diff --git a/go.mod b/go.mod index 3f502a6f..0c194e96 100644 --- a/go.mod +++ b/go.mod @@ -2,26 +2,19 @@ module github.com/cruise-automation/daytona require ( cloud.google.com/go v0.40.0 - github.com/aws/aws-sdk-go v1.19.39 + github.com/aws/aws-sdk-go v1.25.37 github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f github.com/cenkalti/backoff v2.2.1+incompatible - github.com/frankban/quicktest v1.4.1 // indirect - github.com/google/go-cmp v0.3.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-gcp-common v0.5.0 - github.com/hashicorp/golang-lru v0.5.3 // indirect - github.com/hashicorp/vault/api v1.0.5-0.20190909201928-35325e2c3262 + github.com/hashicorp/vault v1.3.3 + github.com/hashicorp/vault-plugin-secrets-kv v0.5.4 + github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 + github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d github.com/mitchellh/go-homedir v1.1.0 - github.com/pierrec/lz4 v2.2.6+incompatible // indirect - github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect - golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // indirect google.golang.org/api v0.6.0 - google.golang.org/appengine v1.6.0 // indirect - google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 // indirect ) go 1.13 diff --git a/go.sum b/go.sum index cd5d24ff..3786583a 100644 --- a/go.sum +++ b/go.sum @@ -1,53 +1,190 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= +github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= +github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.4.13 h1:Hmi80lzZuI/CaYmlJp/b+FjZdRZhKu9c2mDVqKlLWVs= +github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g= +github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.19.39 h1:pIez14zQWSd/TER2Scohm7aCEG2TgoyXSOX6srOKt6o= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37 h1:gBtB/F3dophWpsUQKN/Kni+JzYEH2mGHF4hWNtfED1w= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU= +github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg= +github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a h1:yJ2kD1BvM28M4gt31MuDr0ROKsW+v6zBk9G0Bcr8qAY= +github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= +github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.1.3 h1:RIgdpHXJpsUqUK5WXwKyVsESrGFqo5BRWPk3RR4/ogQ= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df h1:fwXmhM0OqixzJDOGgTSyNH9eEDij9uGTXwsyWXvyR0A= +github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -57,50 +194,116 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul-template v0.22.0/go.mod h1:lHrykBIcPobCuEcIMLJryKxDyk2lUMnQWmffOEONH0k= +github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0 h1:kkIQTjNTopn4eXQ1+lCiHYZXUtgIZvbc6YtAQkMnTos= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= +github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= +github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -109,80 +312,275 @@ github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8 github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.0.5-0.20190909201928-35325e2c3262 h1:En9ph/axtiRQX8mt4+fVBOgNfEmndq5c+K0fUBviFTQ= -github.com/hashicorp/vault/api v1.0.5-0.20190909201928-35325e2c3262/go.mod h1:LGTA4eiQKhPGTBgi6fCuAT5n0S3CJBHa7cpUotrLxjw= -github.com/hashicorp/vault/sdk v0.1.14-0.20190909201848-e0fbf9b652e2 h1:b65cSyZqljnCPzzsUXvR4P0eXypo1xahQyG809+IySk= -github.com/hashicorp/vault/sdk v0.1.14-0.20190909201848-e0fbf9b652e2/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA= +github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf/go.mod h1:BDngVi1f4UA6aJq9WYTgxhfWSE1+42xshvstLU2fRGk= +github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab h1:WzGMwlO1DvaC93SvVOBOKtn+nXGEDXapyJuaRV3/VaY= +github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= +github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= +github.com/hashicorp/vault v1.3.3 h1:v4EKsws7d6h+1fzIN4ql27gbfWaAxQDtDi9lpXi6BzM= +github.com/hashicorp/vault v1.3.3/go.mod h1:V096mtgbO7SmNd6uoIH4Fq0CvVHcb8Kq760cmgGjlH4= +github.com/hashicorp/vault-plugin-auth-alicloud v0.5.3 h1:csNfo6K5iXqL4jBvIuCsO+OKtjaSe5Tl6uJ/pZ7DZVU= +github.com/hashicorp/vault-plugin-auth-alicloud v0.5.3/go.mod h1:BcdV0SALJOcVOopZHMvBsiwagmfeqEjQaEbv/hxJtQ8= +github.com/hashicorp/vault-plugin-auth-azure v0.5.3 h1:6TeV+8VrnAZcscsF42wvKefsCNpNAbfpENnKiS+7fUk= +github.com/hashicorp/vault-plugin-auth-azure v0.5.3/go.mod h1:g92t4Rrvzar1rLQauGUIT6zpcjoeRvL9v37blH+RUKc= +github.com/hashicorp/vault-plugin-auth-centrify v0.5.3 h1:VNQn7xk7po3MJDkxb5naeorcE78rlccOtREJ/sB7YKA= +github.com/hashicorp/vault-plugin-auth-centrify v0.5.3/go.mod h1:G/iY7Pwsjnz2W0l/HFy2ckCCekNobiLM5gSM7q5ddOs= +github.com/hashicorp/vault-plugin-auth-cf v0.5.2 h1:e/m+jTaX/6LRiAh7OBtoMWSiyyHKbCHCDuAh49UO4dg= +github.com/hashicorp/vault-plugin-auth-cf v0.5.2/go.mod h1:XwR+dCpvjiOJaD8pvzdzmC4VOa4DKSyO0hah5XATql8= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.3 h1:6j3hXCDsy0rYkUPus/5fuev5SV/SeR+jZcf/MMlE4sM= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.3/go.mod h1:vHJ9elo4Tzyao9TyFIvWNY/xbKP4xRUVmnP8B9y3XpA= +github.com/hashicorp/vault-plugin-auth-jwt v0.5.3 h1:Yvk2RQrkho04I/6MX3rp3MxOR3XICKyT/rDBmDCtVEM= +github.com/hashicorp/vault-plugin-auth-jwt v0.5.3/go.mod h1:RLxLoG18aAirJBBIAhfq9DmwzkqtqtFt2JzT9d5M4OA= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.3 h1:StvNUePTdfJpNAHAZ8goYseSYcY1rJ/vVBXjSuWzFUk= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.3/go.mod h1:SSMiBiFzKofmMvzQSdCAZGGJe0N+f7DAS3BS9N8BFTM= +github.com/hashicorp/vault-plugin-auth-oci v0.5.2 h1:VW/K4DJ5ian2nSG1ItMNM3gzh7SFRlxuzAe3ZU8PcS0= +github.com/hashicorp/vault-plugin-auth-oci v0.5.2/go.mod h1:B/rjKHzVRFhb8t9lge/ZPJcJ7mZTdQaLk/GGpF1go5w= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.2 h1:tNa6Y5Fb6cv5JJNa1aslZSoGLuIVrzJFM1QLGnYlOzc= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.2/go.mod h1:46f64oFKnX1VTnlkCgHV7gahU889OUM7GXwUHBEmX2E= +github.com/hashicorp/vault-plugin-secrets-ad v0.6.3 h1:XGCIA9kIrruybpvLToqldHo6UFSK4JKCgijdMYWfUT4= +github.com/hashicorp/vault-plugin-secrets-ad v0.6.3/go.mod h1:1bdbzcF5G9AUjNMCKc/KY9QWnup+SaKlGeKtXXu7uz4= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.3 h1:F+TYtKJMMbJRXZ4CorX6zVk54VB7SOBE4Ti/WzLqpvE= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.3/go.mod h1:PmIRuec+84Hcoi4f3f0c02r+1a4kn+AcVK10lIk2TtM= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.4 h1:fZHDdAWF3jDUbWfm2VmldfcXQ8H13pHS4rlJUc8HQLQ= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.4/go.mod h1:ahEywZP+iS2MFtO2ZtAScXDPQ87bSvhZ0HQSvnDkPV4= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.4 h1:e+hA3KmpzXmFZ0HY7SxVVmyk/gtRwpEOd3WTQkLqhmY= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.4/go.mod h1:cfTII8p/QTIxyu25C2VUxsOmJ6WssLGOz4hEscTX9JM= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.3 h1:NgTkAZFMKoVEdcG0kLPFyejNqpiq1cu4BnOaKUumPeI= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.3/go.mod h1:dfCKMtMBTNoHwVrEy0BWD+ZUQuHDvP/FrTjy5bw7XH4= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.3/go.mod h1:sNv78emGvFOrQ7WdRvT4lTmYsxD55IFaLHDjsm/PgSo= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.4 h1:DG6SJttyUGp61PMh5sj0EwAMtFTIIAaiE1a4+RKIgtg= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.4/go.mod h1:B/Cybh5aVF7LNAMHwVBxY8t7r2eL0C6HVGgTyP4nKK4= +github.com/hashicorp/vault/api v1.0.1/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE= +github.com/hashicorp/vault/api v1.0.5-0.20190730042357-746c0b111519/go.mod h1:i9PKqwFko/s/aihU1uuHGh/FaQS+Xcgvd9dvnfAvQb0= +github.com/hashicorp/vault/api v1.0.5-0.20200117231345-460d63e36490/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 h1:biZidYDDEWnuOI9mXnJre8lwHKhb5ym85aSXk3oz/dc= +github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= +github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= +github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= +github.com/hashicorp/vault/sdk v0.1.14-0.20200117231345-460d63e36490/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d h1:Uyra+poga+ulm5m+XNBUUm/eUZ0e6RBVT5jxBcb7fVY= +github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w= +github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= +github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= +github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= +github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= +github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 h1:uA3b4GgZMZxAJsTkd+CVQ85b7KBlD7HLpd/FfTNlGN0= +github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8 h1:1CO5wil3HuiVLrUQ2ovSTO+6AfNOA5EMkHHVyHE9IwA= +github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/oracle/oci-go-sdk v7.0.0+incompatible h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8= +github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY= +github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -191,13 +589,41 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= +golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -213,24 +639,35 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= @@ -242,27 +679,41 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -270,28 +721,41 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g= google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= @@ -300,28 +764,58 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 h1:c9UEl5z8gk1DGh/g3snETZ+a52YeR9VdbX/3BQ4PHas= +k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= +k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b h1:fVkKJL9FIpA8LSJyHVM00MP45q1WJ7+af77vcxmQP4g= +k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= +k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= +k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= +layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/secrets/helpers_test.go b/pkg/secrets/helpers_test.go new file mode 100644 index 00000000..47c6e140 --- /dev/null +++ b/pkg/secrets/helpers_test.go @@ -0,0 +1,48 @@ +package secrets + +import ( + "io/ioutil" + "os" + "testing" + + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func startVaultTestCluster(t *testing.T) (*vault.TestCluster, *api.Client) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + + cluster.Start() + + core := cluster.Cores[0] + vault.TestWaitActive(t, core.Core) + client := core.Client + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv-v2", + }) + if err != nil { + t.Fatal(err) + } + + return cluster, client +} + +func generateTestFile(prefix string) (*os.File, error) { + file, err := ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + return file, nil +} diff --git a/pkg/secrets/reader.go b/pkg/secrets/reader.go index dcd9b056..ccd9c133 100644 --- a/pkg/secrets/reader.go +++ b/pkg/secrets/reader.go @@ -18,6 +18,7 @@ package secrets import ( "context" + "log" "github.com/hashicorp/vault/api" ) @@ -90,6 +91,7 @@ func (pr *ParallelReader) worker() { case <-pr.ctx.Done(): return case keyPath := <-pr.keyPathInChan: + log.Printf("Reading path %s\n", keyPath) secret, err := pr.logicalClient.Read(keyPath) pr.secretOutChan <- &SecretResult{ KeyPath: keyPath, @@ -98,4 +100,4 @@ func (pr *ParallelReader) worker() { } } } -} \ No newline at end of file +} diff --git a/pkg/secrets/secrets.go b/pkg/secrets/secrets.go index 54e16138..fe2804a4 100644 --- a/pkg/secrets/secrets.go +++ b/pkg/secrets/secrets.go @@ -65,7 +65,7 @@ func SecretFetcher(client *api.Client, config cfg.Config) { for _, env := range envs { // VAULT_SECRET_WHATEVER=secret/application/thing // VAULT_SECRETS_WHATEVER=secret/application/things - // envKey=secretPath + // envKey=apex pair := strings.Split(env, "=") envKey := pair[0] apex := os.Getenv(envKey) @@ -90,6 +90,8 @@ func SecretFetcher(client *api.Client, config cfg.Config) { continue } + log.Printf("Found %s, attempting to read secrets from %s", envKey, apex) + // look for a corresponding secretDestinationPrefix key. // sometimes these can be cased inconsistently so we have to attempt normalization. // e.g. VAULT_SECRET_APPLICATIONA --> DAYTONA_SECRET_DESTINATION_applicationa @@ -208,6 +210,13 @@ func (sd *SecretDefinition) addSecrets(client *api.Client, secretResult *SecretR } secretData := secret.Data + // kv version 2 nests secret data beneath two data keys + // this makes a best effort to extract it + nestedData, ok := secretData["data"] + if ok { + secretData = nestedData.(map[string]interface{}) + } + // Return last error encountered during processing, if any var lastErr error @@ -228,6 +237,20 @@ func (sd *SecretDefinition) addSecrets(client *api.Client, secretResult *SecretR func (sd *SecretDefinition) Walk(client *api.Client) error { paths := make([]string, 0) + // kv version 2 secret backends list keys using a + // /metadata/ convention + // this extracts the mount and path values so they can be + // replaced by 'data' and re-assbled below when being + // added to the path slice + var isKV2 bool + var mount, sPath string + kv2Split := strings.Split(sd.secretApex, "metadata") + if len(kv2Split) == 2 { + isKV2 = true + mount = kv2Split[0] + sPath = kv2Split[1] + } + list, err := client.Logical().List(sd.secretApex) if err != nil { return fmt.Errorf("there was a problem listing %s: %s", sd.secretApex, err) @@ -246,7 +269,11 @@ func (sd *SecretDefinition) Walk(client *api.Client) error { if !ok { return fmt.Errorf("non-string secret name: %#v", key) } - paths = append(paths, path.Join(sd.secretApex, key)) + apex := sd.secretApex + if isKV2 { + apex = mount + "data" + sPath + } + paths = append(paths, path.Join(apex, key)) } sd.paths = paths return nil diff --git a/pkg/secrets/secrets_test.go b/pkg/secrets/secrets_test.go index 7b4c6ee5..5caace31 100644 --- a/pkg/secrets/secrets_test.go +++ b/pkg/secrets/secrets_test.go @@ -318,3 +318,71 @@ func TestSecretAWalk(t *testing.T) { assert.Equal(t, "bbbb", destSecrets["credentials_api_b"]) assert.Equal(t, "password", destSecrets["other"]) } + +func TestKV2(t *testing.T) { + cluster, client := startVaultTestCluster(t) + defer cluster.Cleanup() + + var config cfg.Config + + _, err := client.Logical().Write(fmt.Sprintf("kv/data/single"), map[string]interface{}{ + "data": map[string]interface{}{ + "value": "just a regular ole value", + }, + }) + if err != nil { + t.Fatal(err) + } + + for i := 1; i < 4; i++ { + _, err := client.Logical().Write(fmt.Sprintf("kv/data/multiple/thing%v", i), map[string]interface{}{ + "data": map[string]interface{}{ + "value": fmt.Sprintf("%v", i), + }, + }) + if err != nil { + t.Fatal(err) + } + } + + testFiles := make([]*os.File, 0, 2) + testFileNames := []string{"secret-kv2-multiple-dest-", "secret-kv2-single-"} + + for _, fileName := range testFileNames { + f, err := generateTestFile(fileName) + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + testFiles = append(testFiles, f) + } + + os.Setenv("VAULT_SECRETS_THING", "kv/metadata/multiple") + os.Setenv("DAYTONA_SECRET_DESTINATION_THING", testFiles[0].Name()) + os.Setenv("VAULT_SECRET_SINGLE", "kv/data/single") + os.Setenv("DAYTONA_SECRET_DESTINATION_SINGLE", testFiles[1].Name()) + defer os.Unsetenv("VAULT_SECRETS_THING") + defer os.Unsetenv("DAYTONA_SECRET_DESTINATION_THING") + defer os.Unsetenv("VAULT_SECRET_SINGLE") + defer os.Unsetenv("DAYTONA_SECRET_DESTINATION_SINGLE") + + SecretFetcher(client, config) + + type expected struct { + Name string + Payload string + } + + expectedData := []expected{ + expected{Name: testFiles[0].Name(), Payload: `{"thing1":"1","thing2":"2","thing3":"3"}`}, + expected{Name: testFiles[1].Name(), Payload: "just a regular ole value"}, + } + + for _, expected := range expectedData { + data, err := ioutil.ReadFile(expected.Name) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, expected.Payload, string(data)) + } +} diff --git a/vendor/github.com/Jeffail/gabs/LICENSE b/vendor/github.com/Jeffail/gabs/LICENSE new file mode 100644 index 00000000..99a62c62 --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Ashley Jeffs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Jeffail/gabs/README.md b/vendor/github.com/Jeffail/gabs/README.md new file mode 100644 index 00000000..a58193fd --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/README.md @@ -0,0 +1,315 @@ +![Gabs](gabs_logo.png "Gabs") + +Gabs is a small utility for dealing with dynamic or unknown JSON structures in +golang. It's pretty much just a helpful wrapper around the golang +`json.Marshal/json.Unmarshal` behaviour and `map[string]interface{}` objects. +It does nothing spectacular except for being fabulous. + +https://godoc.org/github.com/Jeffail/gabs + +## How to install: + +``` bash +go get github.com/Jeffail/gabs +``` + +## How to use + +### Parsing and searching JSON + +``` go +... + +import "github.com/Jeffail/gabs" + +jsonParsed, err := gabs.ParseJSON([]byte(`{ + "outter":{ + "inner":{ + "value1":10, + "value2":22 + }, + "alsoInner":{ + "value1":20 + } + } +}`)) + +var value float64 +var ok bool + +value, ok = jsonParsed.Path("outter.inner.value1").Data().(float64) +// value == 10.0, ok == true + +value, ok = jsonParsed.Search("outter", "inner", "value1").Data().(float64) +// value == 10.0, ok == true + +value, ok = jsonParsed.Path("does.not.exist").Data().(float64) +// value == 0.0, ok == false + +exists := jsonParsed.Exists("outter", "inner", "value1") +// exists == true + +exists := jsonParsed.Exists("does", "not", "exist") +// exists == false + +exists := jsonParsed.ExistsP("does.not.exist") +// exists == false + +... +``` + +### Iterating objects + +``` go +... + +jsonParsed, _ := gabs.ParseJSON([]byte(`{"object":{ "first": 1, "second": 2, "third": 3 }}`)) + +// S is shorthand for Search +children, _ := jsonParsed.S("object").ChildrenMap() +for key, child := range children { + fmt.Printf("key: %v, value: %v\n", key, child.Data().(string)) +} + +... +``` + +### Iterating arrays + +``` go +... + +jsonParsed, _ := gabs.ParseJSON([]byte(`{"array":[ "first", "second", "third" ]}`)) + +// S is shorthand for Search +children, _ := jsonParsed.S("array").Children() +for _, child := range children { + fmt.Println(child.Data().(string)) +} + +... +``` + +Will print: + +``` +first +second +third +``` + +Children() will return all children of an array in order. This also works on +objects, however, the children will be returned in a random order. + +### Searching through arrays + +If your JSON structure contains arrays you can still search the fields of the +objects within the array, this returns a JSON array containing the results for +each element. + +``` go +... + +jsonParsed, _ := gabs.ParseJSON([]byte(`{"array":[ {"value":1}, {"value":2}, {"value":3} ]}`)) +fmt.Println(jsonParsed.Path("array.value").String()) + +... +``` + +Will print: + +``` +[1,2,3] +``` + +### Generating JSON + +``` go +... + +jsonObj := gabs.New() +// or gabs.Consume(jsonObject) to work on an existing map[string]interface{} + +jsonObj.Set(10, "outter", "inner", "value") +jsonObj.SetP(20, "outter.inner.value2") +jsonObj.Set(30, "outter", "inner2", "value3") + +fmt.Println(jsonObj.String()) + +... +``` + +Will print: + +``` +{"outter":{"inner":{"value":10,"value2":20},"inner2":{"value3":30}}} +``` + +To pretty-print: + +``` go +... + +fmt.Println(jsonObj.StringIndent("", " ")) + +... +``` + +Will print: + +``` +{ + "outter": { + "inner": { + "value": 10, + "value2": 20 + }, + "inner2": { + "value3": 30 + } + } +} +``` + +### Generating Arrays + +``` go +... + +jsonObj := gabs.New() + +jsonObj.Array("foo", "array") +// Or .ArrayP("foo.array") + +jsonObj.ArrayAppend(10, "foo", "array") +jsonObj.ArrayAppend(20, "foo", "array") +jsonObj.ArrayAppend(30, "foo", "array") + +fmt.Println(jsonObj.String()) + +... +``` + +Will print: + +``` +{"foo":{"array":[10,20,30]}} +``` + +Working with arrays by index: + +``` go +... + +jsonObj := gabs.New() + +// Create an array with the length of 3 +jsonObj.ArrayOfSize(3, "foo") + +jsonObj.S("foo").SetIndex("test1", 0) +jsonObj.S("foo").SetIndex("test2", 1) + +// Create an embedded array with the length of 3 +jsonObj.S("foo").ArrayOfSizeI(3, 2) + +jsonObj.S("foo").Index(2).SetIndex(1, 0) +jsonObj.S("foo").Index(2).SetIndex(2, 1) +jsonObj.S("foo").Index(2).SetIndex(3, 2) + +fmt.Println(jsonObj.String()) + +... +``` + +Will print: + +``` +{"foo":["test1","test2",[1,2,3]]} +``` + +### Converting back to JSON + +This is the easiest part: + +``` go +... + +jsonParsedObj, _ := gabs.ParseJSON([]byte(`{ + "outter":{ + "values":{ + "first":10, + "second":11 + } + }, + "outter2":"hello world" +}`)) + +jsonOutput := jsonParsedObj.String() +// Becomes `{"outter":{"values":{"first":10,"second":11}},"outter2":"hello world"}` + +... +``` + +And to serialize a specific segment is as simple as: + +``` go +... + +jsonParsedObj := gabs.ParseJSON([]byte(`{ + "outter":{ + "values":{ + "first":10, + "second":11 + } + }, + "outter2":"hello world" +}`)) + +jsonOutput := jsonParsedObj.Search("outter").String() +// Becomes `{"values":{"first":10,"second":11}}` + +... +``` + +### Merge two containers + +You can merge a JSON structure into an existing one, where collisions will be +converted into a JSON array. + +``` go +jsonParsed1, _ := ParseJSON([]byte(`{"outter": {"value1": "one"}}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"outter": {"inner": {"value3": "three"}}, "outter2": {"value2": "two"}}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"outter":{"inner":{"value3":"three"},"value1":"one"},"outter2":{"value2":"two"}}` +``` + +Arrays are merged: + +``` go +jsonParsed1, _ := ParseJSON([]byte(`{"array": ["one"]}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"array": ["two"]}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"array":["one", "two"]}` +``` + +### Parsing Numbers + +Gabs uses the `json` package under the bonnet, which by default will parse all +number values into `float64`. If you need to parse `Int` values then you should +use a `json.Decoder` (https://golang.org/pkg/encoding/json/#Decoder): + +``` go +sample := []byte(`{"test":{"int":10, "float":6.66}}`) +dec := json.NewDecoder(bytes.NewReader(sample)) +dec.UseNumber() + +val, err := gabs.ParseJSONDecoder(dec) +if err != nil { + t.Errorf("Failed to parse: %v", err) + return +} + +intValue, err := val.Path("test.int").Data().(json.Number).Int64() +``` diff --git a/vendor/github.com/Jeffail/gabs/gabs.go b/vendor/github.com/Jeffail/gabs/gabs.go new file mode 100644 index 00000000..a21a79d7 --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/gabs.go @@ -0,0 +1,581 @@ +/* +Copyright (c) 2014 Ashley Jeffs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +// Package gabs implements a simplified wrapper around creating and parsing JSON. +package gabs + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "strings" +) + +//-------------------------------------------------------------------------------------------------- + +var ( + // ErrOutOfBounds - Index out of bounds. + ErrOutOfBounds = errors.New("out of bounds") + + // ErrNotObjOrArray - The target is not an object or array type. + ErrNotObjOrArray = errors.New("not an object or array") + + // ErrNotObj - The target is not an object type. + ErrNotObj = errors.New("not an object") + + // ErrNotArray - The target is not an array type. + ErrNotArray = errors.New("not an array") + + // ErrPathCollision - Creating a path failed because an element collided with an existing value. + ErrPathCollision = errors.New("encountered value collision whilst building path") + + // ErrInvalidInputObj - The input value was not a map[string]interface{}. + ErrInvalidInputObj = errors.New("invalid input object") + + // ErrInvalidInputText - The input data could not be parsed. + ErrInvalidInputText = errors.New("input text could not be parsed") + + // ErrInvalidPath - The filepath was not valid. + ErrInvalidPath = errors.New("invalid file path") + + // ErrInvalidBuffer - The input buffer contained an invalid JSON string + ErrInvalidBuffer = errors.New("input buffer contained invalid JSON") +) + +//-------------------------------------------------------------------------------------------------- + +// Container - an internal structure that holds a reference to the core interface map of the parsed +// json. Use this container to move context. +type Container struct { + object interface{} +} + +// Data - Return the contained data as an interface{}. +func (g *Container) Data() interface{} { + if g == nil { + return nil + } + return g.object +} + +//-------------------------------------------------------------------------------------------------- + +// Path - Search for a value using dot notation. +func (g *Container) Path(path string) *Container { + return g.Search(strings.Split(path, ".")...) +} + +// Search - Attempt to find and return an object within the JSON structure by specifying the +// hierarchy of field names to locate the target. If the search encounters an array and has not +// reached the end target then it will iterate each object of the array for the target and return +// all of the results in a JSON array. +func (g *Container) Search(hierarchy ...string) *Container { + var object interface{} + + object = g.Data() + for target := 0; target < len(hierarchy); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + object, ok = mmap[hierarchy[target]] + if !ok { + return nil + } + } else if marray, ok := object.([]interface{}); ok { + tmpArray := []interface{}{} + for _, val := range marray { + tmpGabs := &Container{val} + res := tmpGabs.Search(hierarchy[target:]...) + if res != nil { + tmpArray = append(tmpArray, res.Data()) + } + } + if len(tmpArray) == 0 { + return nil + } + return &Container{tmpArray} + } else { + return nil + } + } + return &Container{object} +} + +// S - Shorthand method, does the same thing as Search. +func (g *Container) S(hierarchy ...string) *Container { + return g.Search(hierarchy...) +} + +// Exists - Checks whether a path exists. +func (g *Container) Exists(hierarchy ...string) bool { + return g.Search(hierarchy...) != nil +} + +// ExistsP - Checks whether a dot notation path exists. +func (g *Container) ExistsP(path string) bool { + return g.Exists(strings.Split(path, ".")...) +} + +// Index - Attempt to find and return an object within a JSON array by index. +func (g *Container) Index(index int) *Container { + if array, ok := g.Data().([]interface{}); ok { + if index >= len(array) { + return &Container{nil} + } + return &Container{array[index]} + } + return &Container{nil} +} + +// Children - Return a slice of all the children of the array. This also works for objects, however, +// the children returned for an object will NOT be in order and you lose the names of the returned +// objects this way. +func (g *Container) Children() ([]*Container, error) { + if array, ok := g.Data().([]interface{}); ok { + children := make([]*Container, len(array)) + for i := 0; i < len(array); i++ { + children[i] = &Container{array[i]} + } + return children, nil + } + if mmap, ok := g.Data().(map[string]interface{}); ok { + children := []*Container{} + for _, obj := range mmap { + children = append(children, &Container{obj}) + } + return children, nil + } + return nil, ErrNotObjOrArray +} + +// ChildrenMap - Return a map of all the children of an object. +func (g *Container) ChildrenMap() (map[string]*Container, error) { + if mmap, ok := g.Data().(map[string]interface{}); ok { + children := map[string]*Container{} + for name, obj := range mmap { + children[name] = &Container{obj} + } + return children, nil + } + return nil, ErrNotObj +} + +//-------------------------------------------------------------------------------------------------- + +// Set - Set the value of a field at a JSON path, any parts of the path that do not exist will be +// constructed, and if a collision occurs with a non object type whilst iterating the path an error +// is returned. +func (g *Container) Set(value interface{}, path ...string) (*Container, error) { + if len(path) == 0 { + g.object = value + return g, nil + } + var object interface{} + if g.object == nil { + g.object = map[string]interface{}{} + } + object = g.object + for target := 0; target < len(path); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + if target == len(path)-1 { + mmap[path[target]] = value + } else if mmap[path[target]] == nil { + mmap[path[target]] = map[string]interface{}{} + } + object = mmap[path[target]] + } else { + return &Container{nil}, ErrPathCollision + } + } + return &Container{object}, nil +} + +// SetP - Does the same as Set, but using a dot notation JSON path. +func (g *Container) SetP(value interface{}, path string) (*Container, error) { + return g.Set(value, strings.Split(path, ".")...) +} + +// SetIndex - Set a value of an array element based on the index. +func (g *Container) SetIndex(value interface{}, index int) (*Container, error) { + if array, ok := g.Data().([]interface{}); ok { + if index >= len(array) { + return &Container{nil}, ErrOutOfBounds + } + array[index] = value + return &Container{array[index]}, nil + } + return &Container{nil}, ErrNotArray +} + +// Object - Create a new JSON object at a path. Returns an error if the path contains a collision +// with a non object type. +func (g *Container) Object(path ...string) (*Container, error) { + return g.Set(map[string]interface{}{}, path...) +} + +// ObjectP - Does the same as Object, but using a dot notation JSON path. +func (g *Container) ObjectP(path string) (*Container, error) { + return g.Object(strings.Split(path, ".")...) +} + +// ObjectI - Create a new JSON object at an array index. Returns an error if the object is not an +// array or the index is out of bounds. +func (g *Container) ObjectI(index int) (*Container, error) { + return g.SetIndex(map[string]interface{}{}, index) +} + +// Array - Create a new JSON array at a path. Returns an error if the path contains a collision with +// a non object type. +func (g *Container) Array(path ...string) (*Container, error) { + return g.Set([]interface{}{}, path...) +} + +// ArrayP - Does the same as Array, but using a dot notation JSON path. +func (g *Container) ArrayP(path string) (*Container, error) { + return g.Array(strings.Split(path, ".")...) +} + +// ArrayI - Create a new JSON array at an array index. Returns an error if the object is not an +// array or the index is out of bounds. +func (g *Container) ArrayI(index int) (*Container, error) { + return g.SetIndex([]interface{}{}, index) +} + +// ArrayOfSize - Create a new JSON array of a particular size at a path. Returns an error if the +// path contains a collision with a non object type. +func (g *Container) ArrayOfSize(size int, path ...string) (*Container, error) { + a := make([]interface{}, size) + return g.Set(a, path...) +} + +// ArrayOfSizeP - Does the same as ArrayOfSize, but using a dot notation JSON path. +func (g *Container) ArrayOfSizeP(size int, path string) (*Container, error) { + return g.ArrayOfSize(size, strings.Split(path, ".")...) +} + +// ArrayOfSizeI - Create a new JSON array of a particular size at an array index. Returns an error +// if the object is not an array or the index is out of bounds. +func (g *Container) ArrayOfSizeI(size, index int) (*Container, error) { + a := make([]interface{}, size) + return g.SetIndex(a, index) +} + +// Delete - Delete an element at a JSON path, an error is returned if the element does not exist. +func (g *Container) Delete(path ...string) error { + var object interface{} + + if g.object == nil { + return ErrNotObj + } + object = g.object + for target := 0; target < len(path); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + if target == len(path)-1 { + if _, ok := mmap[path[target]]; ok { + delete(mmap, path[target]) + } else { + return ErrNotObj + } + } + object = mmap[path[target]] + } else { + return ErrNotObj + } + } + return nil +} + +// DeleteP - Does the same as Delete, but using a dot notation JSON path. +func (g *Container) DeleteP(path string) error { + return g.Delete(strings.Split(path, ".")...) +} + +// Merge - Merges two gabs-containers +func (g *Container) Merge(toMerge *Container) error { + var recursiveFnc func(map[string]interface{}, []string) error + recursiveFnc = func(mmap map[string]interface{}, path []string) error { + for key, value := range mmap { + newPath := append(path, key) + if g.Exists(newPath...) { + target := g.Search(newPath...) + switch t := value.(type) { + case map[string]interface{}: + switch targetV := target.Data().(type) { + case map[string]interface{}: + if err := recursiveFnc(t, newPath); err != nil { + return err + } + case []interface{}: + g.Set(append(targetV, t), newPath...) + default: + newSlice := append([]interface{}{}, targetV) + g.Set(append(newSlice, t), newPath...) + } + case []interface{}: + for _, valueOfSlice := range t { + if err := g.ArrayAppend(valueOfSlice, newPath...); err != nil { + return err + } + } + default: + switch targetV := target.Data().(type) { + case []interface{}: + g.Set(append(targetV, t), newPath...) + default: + newSlice := append([]interface{}{}, targetV) + g.Set(append(newSlice, t), newPath...) + } + } + } else { + // path doesn't exist. So set the value + if _, err := g.Set(value, newPath...); err != nil { + return err + } + } + } + return nil + } + if mmap, ok := toMerge.Data().(map[string]interface{}); ok { + return recursiveFnc(mmap, []string{}) + } + return nil +} + +//-------------------------------------------------------------------------------------------------- + +/* +Array modification/search - Keeping these options simple right now, no need for anything more +complicated since you can just cast to []interface{}, modify and then reassign with Set. +*/ + +// ArrayAppend - Append a value onto a JSON array. If the target is not a JSON array then it will be +// converted into one, with its contents as the first element of the array. +func (g *Container) ArrayAppend(value interface{}, path ...string) error { + if array, ok := g.Search(path...).Data().([]interface{}); ok { + array = append(array, value) + _, err := g.Set(array, path...) + return err + } + + newArray := []interface{}{} + if d := g.Search(path...).Data(); d != nil { + newArray = append(newArray, d) + } + newArray = append(newArray, value) + + _, err := g.Set(newArray, path...) + return err +} + +// ArrayAppendP - Append a value onto a JSON array using a dot notation JSON path. +func (g *Container) ArrayAppendP(value interface{}, path string) error { + return g.ArrayAppend(value, strings.Split(path, ".")...) +} + +// ArrayRemove - Remove an element from a JSON array. +func (g *Container) ArrayRemove(index int, path ...string) error { + if index < 0 { + return ErrOutOfBounds + } + array, ok := g.Search(path...).Data().([]interface{}) + if !ok { + return ErrNotArray + } + if index < len(array) { + array = append(array[:index], array[index+1:]...) + } else { + return ErrOutOfBounds + } + _, err := g.Set(array, path...) + return err +} + +// ArrayRemoveP - Remove an element from a JSON array using a dot notation JSON path. +func (g *Container) ArrayRemoveP(index int, path string) error { + return g.ArrayRemove(index, strings.Split(path, ".")...) +} + +// ArrayElement - Access an element from a JSON array. +func (g *Container) ArrayElement(index int, path ...string) (*Container, error) { + if index < 0 { + return &Container{nil}, ErrOutOfBounds + } + array, ok := g.Search(path...).Data().([]interface{}) + if !ok { + return &Container{nil}, ErrNotArray + } + if index < len(array) { + return &Container{array[index]}, nil + } + return &Container{nil}, ErrOutOfBounds +} + +// ArrayElementP - Access an element from a JSON array using a dot notation JSON path. +func (g *Container) ArrayElementP(index int, path string) (*Container, error) { + return g.ArrayElement(index, strings.Split(path, ".")...) +} + +// ArrayCount - Count the number of elements in a JSON array. +func (g *Container) ArrayCount(path ...string) (int, error) { + if array, ok := g.Search(path...).Data().([]interface{}); ok { + return len(array), nil + } + return 0, ErrNotArray +} + +// ArrayCountP - Count the number of elements in a JSON array using a dot notation JSON path. +func (g *Container) ArrayCountP(path string) (int, error) { + return g.ArrayCount(strings.Split(path, ".")...) +} + +//-------------------------------------------------------------------------------------------------- + +// Bytes - Converts the contained object back to a JSON []byte blob. +func (g *Container) Bytes() []byte { + if g.Data() != nil { + if bytes, err := json.Marshal(g.object); err == nil { + return bytes + } + } + return []byte("{}") +} + +// BytesIndent - Converts the contained object to a JSON []byte blob formatted with prefix, indent. +func (g *Container) BytesIndent(prefix string, indent string) []byte { + if g.object != nil { + if bytes, err := json.MarshalIndent(g.object, prefix, indent); err == nil { + return bytes + } + } + return []byte("{}") +} + +// String - Converts the contained object to a JSON formatted string. +func (g *Container) String() string { + return string(g.Bytes()) +} + +// StringIndent - Converts the contained object back to a JSON formatted string with prefix, indent. +func (g *Container) StringIndent(prefix string, indent string) string { + return string(g.BytesIndent(prefix, indent)) +} + +// EncodeOpt is a functional option for the EncodeJSON method. +type EncodeOpt func(e *json.Encoder) + +// EncodeOptHTMLEscape sets the encoder to escape the JSON for html. +func EncodeOptHTMLEscape(doEscape bool) EncodeOpt { + return func(e *json.Encoder) { + e.SetEscapeHTML(doEscape) + } +} + +// EncodeOptIndent sets the encoder to indent the JSON output. +func EncodeOptIndent(prefix string, indent string) EncodeOpt { + return func(e *json.Encoder) { + e.SetIndent(prefix, indent) + } +} + +// EncodeJSON - Encodes the contained object back to a JSON formatted []byte +// using a variant list of modifier functions for the encoder being used. +// Functions for modifying the output are prefixed with EncodeOpt, e.g. +// EncodeOptHTMLEscape. +func (g *Container) EncodeJSON(encodeOpts ...EncodeOpt) []byte { + var b bytes.Buffer + encoder := json.NewEncoder(&b) + encoder.SetEscapeHTML(false) // Do not escape by default. + for _, opt := range encodeOpts { + opt(encoder) + } + if err := encoder.Encode(g.object); err != nil { + return []byte("{}") + } + result := b.Bytes() + if len(result) > 0 { + result = result[:len(result)-1] + } + return result +} + +// New - Create a new gabs JSON object. +func New() *Container { + return &Container{map[string]interface{}{}} +} + +// Consume - Gobble up an already converted JSON object, or a fresh map[string]interface{} object. +func Consume(root interface{}) (*Container, error) { + return &Container{root}, nil +} + +// ParseJSON - Convert a string into a representation of the parsed JSON. +func ParseJSON(sample []byte) (*Container, error) { + var gabs Container + + if err := json.Unmarshal(sample, &gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +// ParseJSONDecoder - Convert a json.Decoder into a representation of the parsed JSON. +func ParseJSONDecoder(decoder *json.Decoder) (*Container, error) { + var gabs Container + + if err := decoder.Decode(&gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +// ParseJSONFile - Read a file and convert into a representation of the parsed JSON. +func ParseJSONFile(path string) (*Container, error) { + if len(path) > 0 { + cBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + container, err := ParseJSON(cBytes) + if err != nil { + return nil, err + } + + return container, nil + } + return nil, ErrInvalidPath +} + +// ParseJSONBuffer - Read the contents of a buffer into a representation of the parsed JSON. +func ParseJSONBuffer(buffer io.Reader) (*Container, error) { + var gabs Container + jsonDecoder := json.NewDecoder(buffer) + if err := jsonDecoder.Decode(&gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +//-------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Jeffail/gabs/gabs_logo.png b/vendor/github.com/Jeffail/gabs/gabs_logo.png new file mode 100644 index 00000000..b6c1fad9 Binary files /dev/null and b/vendor/github.com/Jeffail/gabs/gabs_logo.png differ diff --git a/vendor/github.com/NYTimes/gziphandler/.gitignore b/vendor/github.com/NYTimes/gziphandler/.gitignore new file mode 100644 index 00000000..1377554e --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/NYTimes/gziphandler/.travis.yml b/vendor/github.com/NYTimes/gziphandler/.travis.yml new file mode 100644 index 00000000..94dfae36 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.x + - tip +env: + - GO111MODULE=on +install: + - go mod download +script: + - go test -race -v diff --git a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..cdbca194 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +--- +layout: code-of-conduct +version: v1.0 +--- + +This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. + +Our open source community strives to: + +* **Be friendly and patient.** +* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. +* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. +* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. +* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. +* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. + +## Definitions + +Harassment includes, but is not limited to: + +- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation +- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment +- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle +- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop +- Threats of violence, both physical and psychological +- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm +- Deliberate intimidation +- Stalking or following +- Harassing photography or recording, including logging online activity for harassment purposes +- Sustained disruption of discussion +- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour +- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others +- Continued one-on-one communication after requests to cease +- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse +- Publication of non-harassing private communication + +Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: + +- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ +- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” +- Refusal to explain or debate social justice concepts +- Communicating in a ‘tone’ you don’t find congenial +- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions + + +### Diversity Statement + +We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. + +Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected +characteristics above, including participants with disabilities. + +### Reporting Issues + +If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include: + +- Your contact information. +- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please +include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. +- Any additional information that may be helpful. + +After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. + +### Attribution & Acknowledgements + +We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: + +* [Django](https://www.djangoproject.com/conduct/reporting/) +* [Python](https://www.python.org/community/diversity/) +* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) +* [Contributor Covenant](http://contributor-covenant.org/) +* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) +* [Citizen Code of Conduct](http://citizencodeofconduct.org/) + +This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct diff --git a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md new file mode 100644 index 00000000..b89a9eb4 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing to NYTimes/gziphandler + +This is an open source project started by handful of developers at The New York Times and open to the entire Go community. + +We really appreciate your help! + +## Filing issues + +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +## Contributing code + +Before submitting changes, please follow these guidelines: + +1. Check the open issues and pull requests for existing discussions. +2. Open an issue to discuss a new feature. +3. Write tests. +4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments). +5. Make sure your changes pass `go test`. +6. Make sure the entire test suite passes locally and on Travis CI. +7. Open a Pull Request. +8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file. diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE b/vendor/github.com/NYTimes/gziphandler/LICENSE new file mode 100644 index 00000000..df6192d3 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md new file mode 100644 index 00000000..6259acac --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -0,0 +1,56 @@ +Gzip Handler +============ + +This is a tiny Go package which wraps HTTP handlers to transparently gzip the +response body, for clients which support it. Although it's usually simpler to +leave that to a reverse proxy (like nginx or Varnish), this package is useful +when that's undesirable. + +## Install +```bash +go get -u github.com/NYTimes/gziphandler +``` + +## Usage + +Call `GzipHandler` with any handler (an object which implements the +`http.Handler` interface), and it'll return a new handler which gzips the +response. For example: + +```go +package main + +import ( + "io" + "net/http" + "github.com/NYTimes/gziphandler" +) + +func main() { + withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "Hello, World") + }) + + withGz := gziphandler.GzipHandler(withoutGz) + + http.Handle("/", withGz) + http.ListenAndServe("0.0.0.0:8000", nil) +} +``` + + +## Documentation + +The docs can be found at [godoc.org][docs], as usual. + + +## License + +[Apache 2.0][license]. + + + + +[docs]: https://godoc.org/github.com/NYTimes/gziphandler +[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE diff --git a/vendor/github.com/NYTimes/gziphandler/go.mod b/vendor/github.com/NYTimes/gziphandler/go.mod new file mode 100644 index 00000000..80190127 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/go.mod @@ -0,0 +1,5 @@ +module github.com/NYTimes/gziphandler + +go 1.11 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/NYTimes/gziphandler/go.sum b/vendor/github.com/NYTimes/gziphandler/go.sum new file mode 100644 index 00000000..4347755a --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go new file mode 100644 index 00000000..c112bbdf --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -0,0 +1,532 @@ +package gziphandler // import "github.com/NYTimes/gziphandler" + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "mime" + "net" + "net/http" + "strconv" + "strings" + "sync" +) + +const ( + vary = "Vary" + acceptEncoding = "Accept-Encoding" + contentEncoding = "Content-Encoding" + contentType = "Content-Type" + contentLength = "Content-Length" +) + +type codings map[string]float64 + +const ( + // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set. + // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. + // The examples seem to indicate that it is. + DefaultQValue = 1.0 + + // DefaultMinSize is the default minimum size until we enable gzip compression. + // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer. + // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing. + // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value. + DefaultMinSize = 1400 +) + +// gzipWriterPools stores a sync.Pool for each compression level for reuse of +// gzip.Writers. Use poolIndex to covert a compression level to an index into +// gzipWriterPools. +var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool + +func init() { + for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { + addLevelPool(i) + } + addLevelPool(gzip.DefaultCompression) +} + +// poolIndex maps a compression level to its index into gzipWriterPools. It +// assumes that level is a valid gzip compression level. +func poolIndex(level int) int { + // gzip.DefaultCompression == -1, so we need to treat it special. + if level == gzip.DefaultCompression { + return gzip.BestCompression - gzip.BestSpeed + 1 + } + return level - gzip.BestSpeed +} + +func addLevelPool(level int) { + gzipWriterPools[poolIndex(level)] = &sync.Pool{ + New: func() interface{} { + // NewWriterLevel only returns error on a bad level, we are guaranteeing + // that this will be a valid level so it is okay to ignore the returned + // error. + w, _ := gzip.NewWriterLevel(nil, level) + return w + }, + } +} + +// GzipResponseWriter provides an http.ResponseWriter interface, which gzips +// bytes before writing them to the underlying response. This doesn't close the +// writers, so don't forget to do that. +// It can be configured to skip response smaller than minSize. +type GzipResponseWriter struct { + http.ResponseWriter + index int // Index for gzipWriterPools. + gw *gzip.Writer + + code int // Saves the WriteHeader value. + + minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. + buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. + ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter. + + contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. +} + +type GzipResponseWriterWithCloseNotify struct { + *GzipResponseWriter +} + +func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool { + return w.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Write appends data to the gzip writer. +func (w *GzipResponseWriter) Write(b []byte) (int, error) { + // GZIP responseWriter is initialized. Use the GZIP responseWriter. + if w.gw != nil { + return w.gw.Write(b) + } + + // If we have already decided not to use GZIP, immediately passthrough. + if w.ignore { + return w.ResponseWriter.Write(b) + } + + // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. + // On the first write, w.buf changes from nil to a valid slice + w.buf = append(w.buf, b...) + + var ( + cl, _ = strconv.Atoi(w.Header().Get(contentLength)) + ct = w.Header().Get(contentType) + ce = w.Header().Get(contentEncoding) + ) + // Only continue if they didn't already choose an encoding or a known unhandled content length or type. + if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) { + // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. + if len(w.buf) < w.minSize && cl == 0 { + return len(b), nil + } + // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue. + if cl >= w.minSize || len(w.buf) >= w.minSize { + // If a Content-Type wasn't specified, infer it from the current buffer. + if ct == "" { + ct = http.DetectContentType(w.buf) + w.Header().Set(contentType, ct) + } + // If the Content-Type is acceptable to GZIP, initialize the GZIP writer. + if handleContentType(w.contentTypes, ct) { + if err := w.startGzip(); err != nil { + return 0, err + } + return len(b), nil + } + } + } + // If we got here, we should not GZIP this response. + if err := w.startPlain(); err != nil { + return 0, err + } + return len(b), nil +} + +// startGzip initializes a GZIP writer and writes the buffer. +func (w *GzipResponseWriter) startGzip() error { + // Set the GZIP header. + w.Header().Set(contentEncoding, "gzip") + + // if the Content-Length is already set, then calls to Write on gzip + // will fail to set the Content-Length header since its already set + // See: https://github.com/golang/go/issues/14975. + w.Header().Del(contentLength) + + // Write the header to gzip response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + // Ensure that no other WriteHeader's happen + w.code = 0 + } + + // Initialize and flush the buffer into the gzip response if there are any bytes. + // If there aren't any, we shouldn't initialize it yet because on Close it will + // write the gzip header even if nothing was ever written. + if len(w.buf) > 0 { + // Initialize the GZIP response. + w.init() + n, err := w.gw.Write(w.buf) + + // This should never happen (per io.Writer docs), but if the write didn't + // accept the entire buffer but returned no specific error, we have no clue + // what's going on, so abort just to be safe. + if err == nil && n < len(w.buf) { + err = io.ErrShortWrite + } + return err + } + return nil +} + +// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip. +func (w *GzipResponseWriter) startPlain() error { + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + // Ensure that no other WriteHeader's happen + w.code = 0 + } + w.ignore = true + // If Write was never called then don't call Write on the underlying ResponseWriter. + if w.buf == nil { + return nil + } + n, err := w.ResponseWriter.Write(w.buf) + w.buf = nil + // This should never happen (per io.Writer docs), but if the write didn't + // accept the entire buffer but returned no specific error, we have no clue + // what's going on, so abort just to be safe. + if err == nil && n < len(w.buf) { + err = io.ErrShortWrite + } + return err +} + +// WriteHeader just saves the response code until close or GZIP effective writes. +func (w *GzipResponseWriter) WriteHeader(code int) { + if w.code == 0 { + w.code = code + } +} + +// init graps a new gzip writer from the gzipWriterPool and writes the correct +// content encoding header. +func (w *GzipResponseWriter) init() { + // Bytes written during ServeHTTP are redirected to this gzip writer + // before being written to the underlying response. + gzw := gzipWriterPools[w.index].Get().(*gzip.Writer) + gzw.Reset(w.ResponseWriter) + w.gw = gzw +} + +// Close will close the gzip.Writer and will put it back in the gzipWriterPool. +func (w *GzipResponseWriter) Close() error { + if w.ignore { + return nil + } + + if w.gw == nil { + // GZIP not triggered yet, write out regular response. + err := w.startPlain() + // Returns the error if any at write. + if err != nil { + err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error()) + } + return err + } + + err := w.gw.Close() + gzipWriterPools[w.index].Put(w.gw) + w.gw = nil + return err +} + +// Flush flushes the underlying *gzip.Writer and then the underlying +// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter +// an http.Flusher. +func (w *GzipResponseWriter) Flush() { + if w.gw == nil && !w.ignore { + // Only flush once startGzip or startPlain has been called. + // + // Flush is thus a no-op until we're certain whether a plain + // or gzipped response will be served. + return + } + + if w.gw != nil { + w.gw.Flush() + } + + if fw, ok := w.ResponseWriter.(http.Flusher); ok { + fw.Flush() + } +} + +// Hijack implements http.Hijacker. If the underlying ResponseWriter is a +// Hijacker, its Hijack method is returned. Otherwise an error is returned. +func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := w.ResponseWriter.(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") +} + +// verify Hijacker interface implementation +var _ http.Hijacker = &GzipResponseWriter{} + +// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in +// an error case it panics rather than returning an error. +func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { + wrap, err := NewGzipLevelHandler(level) + if err != nil { + panic(err) + } + return wrap +} + +// NewGzipLevelHandler returns a wrapper function (often known as middleware) +// which can be used to wrap an HTTP handler to transparently gzip the response +// body if the client supports it (via the Accept-Encoding header). Responses will +// be encoded at the given gzip compression level. An error will be returned only +// if an invalid gzip compression level is given, so if one can ensure the level +// is valid, the returned error can be safely ignored. +func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { + return NewGzipLevelAndMinSize(level, DefaultMinSize) +} + +// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller +// specify the minimum size before compression. +func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { + return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize)) +} + +func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) { + c := &config{ + level: gzip.DefaultCompression, + minSize: DefaultMinSize, + } + + for _, o := range opts { + o(c) + } + + if err := c.validate(); err != nil { + return nil, err + } + + return func(h http.Handler) http.Handler { + index := poolIndex(c.level) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(vary, acceptEncoding) + if acceptsGzip(r) { + gw := &GzipResponseWriter{ + ResponseWriter: w, + index: index, + minSize: c.minSize, + contentTypes: c.contentTypes, + } + defer gw.Close() + + if _, ok := w.(http.CloseNotifier); ok { + gwcn := GzipResponseWriterWithCloseNotify{gw} + h.ServeHTTP(gwcn, r) + } else { + h.ServeHTTP(gw, r) + } + + } else { + h.ServeHTTP(w, r) + } + }) + }, nil +} + +// Parsed representation of one of the inputs to ContentTypes. +// See https://golang.org/pkg/mime/#ParseMediaType +type parsedContentType struct { + mediaType string + params map[string]string +} + +// equals returns whether this content type matches another content type. +func (pct parsedContentType) equals(mediaType string, params map[string]string) bool { + if pct.mediaType != mediaType { + return false + } + // if pct has no params, don't care about other's params + if len(pct.params) == 0 { + return true + } + + // if pct has any params, they must be identical to other's. + if len(pct.params) != len(params) { + return false + } + for k, v := range pct.params { + if w, ok := params[k]; !ok || v != w { + return false + } + } + return true +} + +// Used for functional configuration. +type config struct { + minSize int + level int + contentTypes []parsedContentType +} + +func (c *config) validate() error { + if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) { + return fmt.Errorf("invalid compression level requested: %d", c.level) + } + + if c.minSize < 0 { + return fmt.Errorf("minimum size must be more than zero") + } + + return nil +} + +type option func(c *config) + +func MinSize(size int) option { + return func(c *config) { + c.minSize = size + } +} + +func CompressionLevel(level int) option { + return func(c *config) { + c.level = level + } +} + +// ContentTypes specifies a list of content types to compare +// the Content-Type header to before compressing. If none +// match, the response will be returned as-is. +// +// Content types are compared in a case-insensitive, whitespace-ignored +// manner. +// +// A MIME type without any other directive will match a content type +// that has the same MIME type, regardless of that content type's other +// directives. I.e., "text/html" will match both "text/html" and +// "text/html; charset=utf-8". +// +// A MIME type with any other directive will only match a content type +// that has the same MIME type and other directives. I.e., +// "text/html; charset=utf-8" will only match "text/html; charset=utf-8". +// +// By default, responses are gzipped regardless of +// Content-Type. +func ContentTypes(types []string) option { + return func(c *config) { + c.contentTypes = []parsedContentType{} + for _, v := range types { + mediaType, params, err := mime.ParseMediaType(v) + if err == nil { + c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params}) + } + } + } +} + +// GzipHandler wraps an HTTP handler, to transparently gzip the response body if +// the client supports it (via the Accept-Encoding header). This will compress at +// the default compression level. +func GzipHandler(h http.Handler) http.Handler { + wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) + return wrapper(h) +} + +// acceptsGzip returns true if the given HTTP request indicates that it will +// accept a gzipped response. +func acceptsGzip(r *http.Request) bool { + acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) + return acceptedEncodings["gzip"] > 0.0 +} + +// returns true if we've been configured to compress the specific content type. +func handleContentType(contentTypes []parsedContentType, ct string) bool { + // If contentTypes is empty we handle all content types. + if len(contentTypes) == 0 { + return true + } + + mediaType, params, err := mime.ParseMediaType(ct) + if err != nil { + return false + } + + for _, c := range contentTypes { + if c.equals(mediaType, params) { + return true + } + } + + return false +} + +// parseEncodings attempts to parse a list of codings, per RFC 2616, as might +// appear in an Accept-Encoding header. It returns a map of content-codings to +// quality values, and an error containing the errors encountered. It's probably +// safe to ignore those, because silently ignoring errors is how the internet +// works. +// +// See: http://tools.ietf.org/html/rfc2616#section-14.3. +func parseEncodings(s string) (codings, error) { + c := make(codings) + var e []string + + for _, ss := range strings.Split(s, ",") { + coding, qvalue, err := parseCoding(ss) + + if err != nil { + e = append(e, err.Error()) + } else { + c[coding] = qvalue + } + } + + // TODO (adammck): Use a proper multi-error struct, so the individual errors + // can be extracted if anyone cares. + if len(e) > 0 { + return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) + } + + return c, nil +} + +// parseCoding parses a single conding (content-coding with an optional qvalue), +// as might appear in an Accept-Encoding header. It attempts to forgive minor +// formatting errors. +func parseCoding(s string) (coding string, qvalue float64, err error) { + for n, part := range strings.Split(s, ";") { + part = strings.TrimSpace(part) + qvalue = DefaultQValue + + if n == 0 { + coding = strings.ToLower(part) + } else if strings.HasPrefix(part, "q=") { + qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) + + if qvalue < 0.0 { + qvalue = 0.0 + } else if qvalue > 1.0 { + qvalue = 1.0 + } + } + } + + if coding == "" { + err = fmt.Errorf("empty content-coding") + } + + return +} diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go new file mode 100644 index 00000000..fa9665b7 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go @@ -0,0 +1,43 @@ +// +build go1.8 + +package gziphandler + +import "net/http" + +// Push initiates an HTTP/2 server push. +// Push returns ErrNotSupported if the client has disabled push or if push +// is not supported on the underlying connection. +func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error { + pusher, ok := w.ResponseWriter.(http.Pusher) + if ok && pusher != nil { + return pusher.Push(target, setAcceptEncodingForPushOptions(opts)) + } + return http.ErrNotSupported +} + +// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers. +func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions { + + if opts == nil { + opts = &http.PushOptions{ + Header: http.Header{ + acceptEncoding: []string{"gzip"}, + }, + } + return opts + } + + if opts.Header == nil { + opts.Header = http.Header{ + acceptEncoding: []string{"gzip"}, + } + return opts + } + + if encoding := opts.Header.Get(acceptEncoding); encoding == "" { + opts.Header.Add(acceptEncoding, "gzip") + return opts + } + + return opts +} diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE new file mode 100644 index 00000000..ae80b672 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md new file mode 100644 index 00000000..426d1a46 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/README.md @@ -0,0 +1,6 @@ +wmi +=== + +Package wmi provides a WQL interface to Windows WMI. + +Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go new file mode 100644 index 00000000..9765a53f --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/swbemservices.go @@ -0,0 +1,260 @@ +// +build windows + +package wmi + +import ( + "fmt" + "reflect" + "runtime" + "sync" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx +type SWbemServices struct { + //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance + cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method + sWbemLocatorIUnknown *ole.IUnknown + sWbemLocatorIDispatch *ole.IDispatch + queries chan *queryRequest + closeError chan error + lQueryorClose sync.Mutex +} + +type queryRequest struct { + query string + dst interface{} + args []interface{} + finished chan error +} + +// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI +func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { + //fmt.Println("InitializeSWbemServices: Starting") + //TODO: implement connectServerArgs as optional argument for init with connectServer call + s := new(SWbemServices) + s.cWMIClient = c + s.queries = make(chan *queryRequest) + initError := make(chan error) + go s.process(initError) + + err, ok := <-initError + if ok { + return nil, err //Send error to caller + } + //fmt.Println("InitializeSWbemServices: Finished") + return s, nil +} + +// Close will clear and release all of the SWbemServices resources +func (s *SWbemServices) Close() error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + //fmt.Println("Close: sending close request") + var result error + ce := make(chan error) + s.closeError = ce //Race condition if multiple callers to close. May need to lock here + close(s.queries) //Tell background to shut things down + s.lQueryorClose.Unlock() + err, ok := <-ce + if ok { + result = err + } + //fmt.Println("Close: finished") + return result +} + +func (s *SWbemServices) process(initError chan error) { + //fmt.Println("process: starting background thread initialization") + //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine + runtime.LockOSThread() + defer runtime.LockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) + return + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) + return + } else if unknown == nil { + initError <- ErrNilCreateObject + return + } + defer unknown.Release() + s.sWbemLocatorIUnknown = unknown + + dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) + return + } + defer dispatch.Release() + s.sWbemLocatorIDispatch = dispatch + + // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs + //fmt.Println("process: initialized. closing initError") + close(initError) + //fmt.Println("process: waiting for queries") + for q := range s.queries { + //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) + errQuery := s.queryBackground(q) + //fmt.Println("process: s.queryBackground finished") + if errQuery != nil { + q.finished <- errQuery + } + close(q.finished) + } + //fmt.Println("process: queries channel closed") + s.queries = nil //set channel to nil so we know it is closed + //TODO: I think the Release/Clear calls can panic if things are in a bad state. + //TODO: May need to recover from panics and send error to method caller instead. + close(s.closeError) +} + +// Query runs the WQL query using a SWbemServices instance and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + + //fmt.Println("Query: Sending query request") + qr := queryRequest{ + query: query, + dst: dst, + args: connectServerArgs, + finished: make(chan error), + } + s.queries <- &qr + s.lQueryorClose.Unlock() + err, ok := <-qr.finished + if ok { + //fmt.Println("Query: Finished with error") + return err //Send error to caller + } + //fmt.Println("Query: Finished") + return nil +} + +func (s *SWbemServices) queryBackground(q *queryRequest) error { + if s == nil || s.sWbemLocatorIDispatch == nil { + return fmt.Errorf("SWbemServices is not Initialized") + } + wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart + //fmt.Println("queryBackground: Starting") + + dv := reflect.ValueOf(q.dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + //fmt.Println("queryBackground: Finished") + return errFieldMismatch +} diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go new file mode 100644 index 00000000..a951b125 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/wmi.go @@ -0,0 +1,486 @@ +// +build windows + +/* +Package wmi provides a WQL interface for WMI on Windows. + +Example code to print names of running processes: + + type Win32_Process struct { + Name string + } + + func main() { + var dst []Win32_Process + q := wmi.CreateQuery(&dst, "") + err := wmi.Query(q, &dst) + if err != nil { + log.Fatal(err) + } + for i, v := range dst { + println(i, v.Name) + } + } + +*/ +package wmi + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +var l = log.New(os.Stdout, "", log.LstdFlags) + +var ( + ErrInvalidEntityType = errors.New("wmi: invalid entity type") + // ErrNilCreateObject is the error returned if CreateObject returns nil even + // if the error was nil. + ErrNilCreateObject = errors.New("wmi: create object returned nil") + lock sync.Mutex +) + +// S_FALSE is returned by CoInitializeEx if it was already called on this thread. +const S_FALSE = 0x00000001 + +// QueryNamespace invokes Query with the given namespace on the local machine. +func QueryNamespace(query string, dst interface{}, namespace string) error { + return Query(query, dst, nil, namespace) +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +// +// Query is a wrapper around DefaultClient.Query. +func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + if DefaultClient.SWbemServicesClient == nil { + return DefaultClient.Query(query, dst, connectServerArgs...) + } + return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) +} + +// A Client is an WMI query client. +// +// Its zero value (DefaultClient) is a usable client. +type Client struct { + // NonePtrZero specifies if nil values for fields which aren't pointers + // should be returned as the field types zero value. + // + // Setting this to true allows stucts without pointer fields to be used + // without the risk failure should a nil value returned from WMI. + NonePtrZero bool + + // PtrNil specifies if nil values for pointer fields should be returned + // as nil. + // + // Setting this to true will set pointer fields to nil where WMI + // returned nil, otherwise the types zero value will be returned. + PtrNil bool + + // AllowMissingFields specifies that struct fields not present in the + // query result should not result in an error. + // + // Setting this to true allows custom queries to be used with full + // struct definitions instead of having to define multiple structs. + AllowMissingFields bool + + // SWbemServiceClient is an optional SWbemServices object that can be + // initialized and then reused across multiple queries. If it is null + // then the method will initialize a new temporary client each time. + SWbemServicesClient *SWbemServices +} + +// DefaultClient is the default Client and is used by Query, QueryNamespace +var DefaultClient = &Client{} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + dv := reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + lock.Lock() + defer lock.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + return err + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + return err + } else if unknown == nil { + return ErrNilCreateObject + } + defer unknown.Release() + + wmi, err := unknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + return err + } + defer wmi.Release() + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = c.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + return errFieldMismatch +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +var timeType = reflect.TypeOf(time.Time{}) + +// loadEntity loads a SWbemObject into a struct pointer. +func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { + v := reflect.ValueOf(dst).Elem() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + of := f + isPtr := f.Kind() == reflect.Ptr + if isPtr { + ptr := reflect.New(f.Type().Elem()) + f.Set(ptr) + f = f.Elem() + } + n := v.Type().Field(i).Name + if !f.CanSet() { + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "CanSet() is false", + } + } + prop, err := oleutil.GetProperty(src, n) + if err != nil { + if !c.AllowMissingFields { + errFieldMismatch = &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "no such struct field", + } + } + continue + } + defer prop.Clear() + + switch val := prop.Value().(type) { + case int8, int16, int32, int64, int: + v := reflect.ValueOf(val).Int() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(v) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(uint64(v)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case uint8, uint16, uint32, uint64: + v := reflect.ValueOf(val).Uint() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(int64(v)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(v) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case string: + switch f.Kind() { + case reflect.String: + f.SetString(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + iv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + f.SetInt(iv) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uv, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + f.SetUint(uv) + case reflect.Struct: + switch f.Type() { + case timeType: + if len(val) == 25 { + mins, err := strconv.Atoi(val[22:]) + if err != nil { + return err + } + val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) + } + t, err := time.Parse("20060102150405.000000-0700", val) + if err != nil { + return err + } + f.Set(reflect.ValueOf(t)) + } + } + case bool: + switch f.Kind() { + case reflect.Bool: + f.SetBool(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a bool", + } + } + case float32: + switch f.Kind() { + case reflect.Float32: + f.SetFloat(float64(val)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float32", + } + } + default: + if f.Kind() == reflect.Slice { + switch f.Type().Elem().Kind() { + case reflect.String: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetString(v.(string)) + } + f.Set(fArr) + } + case reflect.Uint8: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetUint(reflect.ValueOf(v).Uint()) + } + f.Set(fArr) + } + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported slice type (%T)", val), + } + } + } else { + typeof := reflect.TypeOf(val) + if typeof == nil && (isPtr || c.NonePtrZero) { + if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { + of.Set(reflect.Zero(of.Type())) + } + break + } + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported type (%T)", val), + } + } + } + } + return errFieldMismatch +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypeStruct + multiArgTypeStructPtr +) + +// checkMultiArg checks that v has type []S, []*S for some struct type S. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +func oleInt64(item *ole.IDispatch, prop string) (int64, error) { + v, err := oleutil.GetProperty(item, prop) + if err != nil { + return 0, err + } + defer v.Clear() + + i := int64(v.Val) + return i, nil +} + +// CreateQuery returns a WQL query string that queries all columns of src. where +// is an optional string that is appended to the query, to be used with WHERE +// clauses. In such a case, the "WHERE" string should appear at the beginning. +func CreateQuery(src interface{}, where string) string { + var b bytes.Buffer + b.WriteString("SELECT ") + s := reflect.Indirect(reflect.ValueOf(src)) + t := s.Type() + if s.Kind() == reflect.Slice { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return "" + } + var fields []string + for i := 0; i < t.NumField(); i++ { + fields = append(fields, t.Field(i).Name) + } + b.WriteString(strings.Join(fields, ", ")) + b.WriteString(" FROM ") + b.WriteString(t.Name()) + b.WriteString(" " + where) + return b.String() +} diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 00000000..8c03ec11 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml new file mode 100644 index 00000000..87d230c8 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.x" + +env: + - GO111MODULE=on + +install: + - go get ./... + +script: + - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 00000000..106569e5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 00000000..aa73348c --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 00000000..31098dd5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 00000000..38136af3 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod new file mode 100644 index 00000000..5df13430 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.mod @@ -0,0 +1,19 @@ +module github.com/armon/go-metrics + +go 1.12 + +require ( + github.com/DataDog/datadog-go v3.2.0+incompatible + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.3 // indirect + github.com/pascaldekloe/goe v0.1.0 + github.com/pkg/errors v0.8.1 // indirect + github.com/prometheus/client_golang v0.9.2 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 + github.com/stretchr/testify v1.3.0 // indirect + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect +) diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum new file mode 100644 index 00000000..0c4c45c9 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.sum @@ -0,0 +1,47 @@ +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 00000000..93b0e0ad --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v.deepCopy() + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v.deepCopy() + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 00000000..5fac958d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,131 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = data[0] + default: + // Show the most recent finished interval if we have one + interval = data[n-2] + } + + interval.RLock() + defer interval.RUnlock() + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 00000000..0937f4ae --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 00000000..4920d683 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,278 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := []Label{} + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 00000000..0b7d6e4b --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 00000000..32a28c48 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,141 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 00000000..1bfffce4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 00000000..6c0d284d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-proxyproto/.gitignore b/vendor/github.com/armon/go-proxyproto/.gitignore new file mode 100644 index 00000000..dd2440d5 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/.gitignore @@ -0,0 +1,2 @@ +*.test +*~ diff --git a/vendor/github.com/armon/go-proxyproto/LICENSE b/vendor/github.com/armon/go-proxyproto/LICENSE new file mode 100644 index 00000000..3ed5f430 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/armon/go-proxyproto/README.md b/vendor/github.com/armon/go-proxyproto/README.md new file mode 100644 index 00000000..47e97188 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/README.md @@ -0,0 +1,36 @@ +# proxyproto + +This library provides the `proxyproto` package which can be used for servers +listening behind HAProxy of Amazon ELB load balancers. Those load balancers +support the use of a proxy protocol (http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt), +which provides a simple mechansim for the server to get the address of the client +instead of the load balancer. + +This library provides both a net.Listener and net.Conn implementation that +can be used to handle situation in which you may be using the proxy protocol. +Only proxy protocol version 1, the human-readable form, is understood. + +The only caveat is that we check for the "PROXY " prefix to determine if the protocol +is being used. If that string may occur as part of your input, then it is ambiguous +if the protocol is being used and you may have problems. + +# Documentation + +Full documentation can be found [here](http://godoc.org/github.com/armon/go-proxyproto). + +# Examples + +Using the library is very simple: + +``` + +// Create a listener +list, err := net.Listen("tcp", "...") + +// Wrap listener in a proxyproto listener +proxyList := &proxyproto.Listener{Listener: list} +conn, err :=proxyList.Accept() + +... +``` + diff --git a/vendor/github.com/armon/go-proxyproto/protocol.go b/vendor/github.com/armon/go-proxyproto/protocol.go new file mode 100644 index 00000000..9df25e9a --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/protocol.go @@ -0,0 +1,253 @@ +package proxyproto + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "log" + "net" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // prefix is the string we look for at the start of a connection + // to check if this connection is using the proxy protocol + prefix = []byte("PROXY ") + prefixLen = len(prefix) + + ErrInvalidUpstream = errors.New("upstream connection address not trusted for PROXY information") +) + +// SourceChecker can be used to decide whether to trust the PROXY info or pass +// the original connection address through. If set, the connecting address is +// passed in as an argument. If the function returns an error due to the source +// being disallowed, it should return ErrInvalidUpstream. +// +// If error is not nil, the call to Accept() will fail. If the reason for +// triggering this failure is due to a disallowed source, it should return +// ErrInvalidUpstream. +// +// If bool is true, the PROXY-set address is used. +// +// If bool is false, the connection's remote address is used, rather than the +// address claimed in the PROXY info. +type SourceChecker func(net.Addr) (bool, error) + +// Listener is used to wrap an underlying listener, +// whose connections may be using the HAProxy Proxy Protocol (version 1). +// If the connection is using the protocol, the RemoteAddr() will return +// the correct client address. +// +// Optionally define ProxyHeaderTimeout to set a maximum time to +// receive the Proxy Protocol Header. Zero means no timeout. +type Listener struct { + Listener net.Listener + ProxyHeaderTimeout time.Duration + SourceCheck SourceChecker +} + +// Conn is used to wrap and underlying connection which +// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will +// return the address of the client instead of the proxy address. +type Conn struct { + bufReader *bufio.Reader + conn net.Conn + dstAddr *net.TCPAddr + srcAddr *net.TCPAddr + useConnAddr bool + once sync.Once + proxyHeaderTimeout time.Duration +} + +// Accept waits for and returns the next connection to the listener. +func (p *Listener) Accept() (net.Conn, error) { + // Get the underlying connection + conn, err := p.Listener.Accept() + if err != nil { + return nil, err + } + var useConnAddr bool + if p.SourceCheck != nil { + allowed, err := p.SourceCheck(conn.RemoteAddr()) + if err != nil { + return nil, err + } + if !allowed { + useConnAddr = true + } + } + newConn := NewConn(conn, p.ProxyHeaderTimeout) + newConn.useConnAddr = useConnAddr + return newConn, nil +} + +// Close closes the underlying listener. +func (p *Listener) Close() error { + return p.Listener.Close() +} + +// Addr returns the underlying listener's network address. +func (p *Listener) Addr() net.Addr { + return p.Listener.Addr() +} + +// NewConn is used to wrap a net.Conn that may be speaking +// the proxy protocol into a proxyproto.Conn +func NewConn(conn net.Conn, timeout time.Duration) *Conn { + pConn := &Conn{ + bufReader: bufio.NewReader(conn), + conn: conn, + proxyHeaderTimeout: timeout, + } + return pConn +} + +// Read is check for the proxy protocol header when doing +// the initial scan. If there is an error parsing the header, +// it is returned and the socket is closed. +func (p *Conn) Read(b []byte) (int, error) { + var err error + p.once.Do(func() { err = p.checkPrefix() }) + if err != nil { + return 0, err + } + return p.bufReader.Read(b) +} + +func (p *Conn) Write(b []byte) (int, error) { + return p.conn.Write(b) +} + +func (p *Conn) Close() error { + return p.conn.Close() +} + +func (p *Conn) LocalAddr() net.Addr { + p.checkPrefixOnce() + if p.dstAddr != nil && !p.useConnAddr { + return p.dstAddr + } + return p.conn.LocalAddr() +} + +// RemoteAddr returns the address of the client if the proxy +// protocol is being used, otherwise just returns the address of +// the socket peer. If there is an error parsing the header, the +// address of the client is not returned, and the socket is closed. +// Once implication of this is that the call could block if the +// client is slow. Using a Deadline is recommended if this is called +// before Read() +func (p *Conn) RemoteAddr() net.Addr { + p.checkPrefixOnce() + if p.srcAddr != nil && !p.useConnAddr { + return p.srcAddr + } + return p.conn.RemoteAddr() +} + +func (p *Conn) SetDeadline(t time.Time) error { + return p.conn.SetDeadline(t) +} + +func (p *Conn) SetReadDeadline(t time.Time) error { + return p.conn.SetReadDeadline(t) +} + +func (p *Conn) SetWriteDeadline(t time.Time) error { + return p.conn.SetWriteDeadline(t) +} + +func (p *Conn) checkPrefixOnce() { + p.once.Do(func() { + if err := p.checkPrefix(); err != nil && err != io.EOF { + log.Printf("[ERR] Failed to read proxy prefix: %v", err) + p.Close() + p.bufReader = bufio.NewReader(p.conn) + } + }) +} + +func (p *Conn) checkPrefix() error { + if p.proxyHeaderTimeout != 0 { + readDeadLine := time.Now().Add(p.proxyHeaderTimeout) + p.conn.SetReadDeadline(readDeadLine) + defer p.conn.SetReadDeadline(time.Time{}) + } + + // Incrementally check each byte of the prefix + for i := 1; i <= prefixLen; i++ { + inp, err := p.bufReader.Peek(i) + + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Timeout() { + return nil + } else { + return err + } + } + + // Check for a prefix mis-match, quit early + if !bytes.Equal(inp, prefix[:i]) { + return nil + } + } + + // Read the header line + header, err := p.bufReader.ReadString('\n') + if err != nil { + p.conn.Close() + return err + } + + // Strip the carriage return and new line + header = header[:len(header)-2] + + // Split on spaces, should be (PROXY ) + parts := strings.Split(header, " ") + if len(parts) != 6 { + p.conn.Close() + return fmt.Errorf("Invalid header line: %s", header) + } + + // Verify the type is known + switch parts[1] { + case "TCP4": + case "TCP6": + default: + p.conn.Close() + return fmt.Errorf("Unhandled address type: %s", parts[1]) + } + + // Parse out the source address + ip := net.ParseIP(parts[2]) + if ip == nil { + p.conn.Close() + return fmt.Errorf("Invalid source ip: %s", parts[2]) + } + port, err := strconv.Atoi(parts[4]) + if err != nil { + p.conn.Close() + return fmt.Errorf("Invalid source port: %s", parts[4]) + } + p.srcAddr = &net.TCPAddr{IP: ip, Port: port} + + // Parse out the destination address + ip = net.ParseIP(parts[3]) + if ip == nil { + p.conn.Close() + return fmt.Errorf("Invalid destination ip: %s", parts[3]) + } + port, err = strconv.Atoi(parts[5]) + if err != nil { + p.conn.Close() + return fmt.Errorf("Invalid destination port: %s", parts[5]) + } + p.dstAddr = &net.TCPAddr{IP: ip, Port: port} + + return nil +} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml new file mode 100644 index 00000000..1a0bbea6 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 00000000..a5df10e6 --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 00000000..26f42a28 --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod new file mode 100644 index 00000000..4336aa29 --- /dev/null +++ b/vendor/github.com/armon/go-radix/go.mod @@ -0,0 +1 @@ +module github.com/armon/go-radix diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 00000000..e2bb22eb --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index a2c5817c..9cf7eaf4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -208,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 11c52c38..a4eb6a7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 70960538..03334d69 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,6 +12,7 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string @@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 + maxRetries = DefaultRetryerMaxNumRetries } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d0..9f6af19d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,6 +1,7 @@ package client import ( + "math" "strconv" "time" @@ -9,82 +10,142 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. // -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration } +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - minTime = 500 + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay } retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) } + return delay + initialDelay +} - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 7b5e1276..8958c32d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fdd..0c48f72e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000..881d575f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 10634d17..93ebbcc1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -246,12 +246,18 @@ type Config struct { // Disabling this feature is useful when you want to use local endpoints // for testing that do not support the modeled host prefix pattern. DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -420,6 +426,20 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -520,6 +540,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableEndpointHostPrefix != nil { dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index ff5d58e0..4e076c18 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index f8853d78..0c60e612 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 894bbc7f..4af59215 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,9 +50,10 @@ package credentials import ( "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" "sync" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -83,6 +84,12 @@ type Value struct { ProviderName string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index c2b2c5d6..1a7af53a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index b6dbfd24..2e528d13 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -200,7 +200,7 @@ type AssumeRoleProvider struct { // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must // have a value between 0 and 1. Any other value may lead to expected behavior. // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // + // // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the // AssumeRole call will be made with an arbitrary Duration between 27m and // 30m. @@ -258,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000..b20b6339 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,100 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFilePath string + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFilePath: path, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + b, err := ioutil.ReadFile(p.tokenFilePath) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 152d785b..25a66d1d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,30 +1,61 @@ -// Package csm provides Client Side Monitoring (CSM) which enables sending metrics -// via UDP connection. Using the Start function will enable the reporting of -// metrics on a given port. If Start is called, with different parameters, again, -// a panic will occur. +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. // -// Pause can be called to pause any metrics publishing on a given port. Sessions -// that have had their handlers modified via InjectHandlers may still be used. -// However, the handlers will act as a no-op meaning no metrics will be published. +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. // -// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// client := s3.New(sess) -// resp, err := client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() // // // Will pause monitoring // r.Pause() @@ -35,12 +66,4 @@ // // // Resume monitoring // r.Continue() -// -// Start returns a Reporter that is used to enable or disable monitoring. If -// access to the Reporter is required later, calling Get will return the Reporter -// singleton. -// -// Example: -// r := csm.Get() -// r.Continue() package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 2f0c6eac..4b19e280 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,6 +2,7 @@ package csm import ( "fmt" + "strings" "sync" ) @@ -9,19 +10,40 @@ var ( lock sync.Mutex ) -// Client side metric handler names const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) -// Start will start the a long running go routine to capture +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// Example: -// r, err := csm.Start("clientID", "127.0.0.1:8094") +// r, err := csm.Start("clientID", "127.0.0.1:31000") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 514fc373..82a3e345 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,25 +16,26 @@ var ( type metricChan struct { ch chan metric - paused int64 + paused *int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), + ch: make(chan metric, size), + paused: new(int64), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(&ch.paused, pausedEnum) + atomic.StoreInt64(ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(&ch.paused, runningEnum) + atomic.StoreInt64(ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(&ch.paused) + v := atomic.LoadInt64(ch.paused) return v == pausedEnum } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index d9aa5b06..9186587f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,11 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -const ( - // DefaultPort is used when no port is specified - DefaultPort = "31000" -) - // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } @@ -123,7 +117,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } @@ -190,8 +184,9 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from -// being added. +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -203,8 +198,9 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring -// to be resumed. +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -219,10 +215,18 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// Example: +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 2c8d5f56..d126764c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -152,18 +152,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index f0c1d31e..4c5636e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) return } @@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3f..343a2106 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) custFixAppAutoscalingUsGov(p) @@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) { custAddDualstack(p, "s3-control") } +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + func custAddDualstack(p *partition, svcName string) { s, ok := p.Services[svcName] if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 14e32bc4..de07715d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,6 +11,8 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. @@ -27,6 +29,7 @@ const ( EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -46,8 +49,18 @@ const ( UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -55,7 +68,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -69,6 +82,8 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, + awsisoPartition, + awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -82,7 +97,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") return reg }(), }, @@ -128,6 +143,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "EU (Paris)", }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -166,6 +184,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -178,6 +197,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -189,6 +209,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -270,6 +292,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "sa-east-1": endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -308,6 +336,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -327,6 +356,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -334,8 +364,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -381,6 +415,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -409,6 +444,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -428,6 +464,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -448,8 +485,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "appsync": service{ @@ -471,6 +514,7 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -478,10 +522,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -502,6 +550,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -533,9 +582,33 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -547,6 +620,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -638,6 +712,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -693,6 +768,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -729,6 +806,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -739,6 +817,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -750,6 +829,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -791,6 +871,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -800,11 +881,12 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ @@ -822,6 +904,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -863,6 +946,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -959,10 +1043,13 @@ var awsPartition = partition{ "comprehendmedical": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -980,6 +1067,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -987,6 +1075,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cur": service{ Endpoints: endpoints{ @@ -1006,6 +1104,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -1023,12 +1137,40 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dax": service{ @@ -1040,6 +1182,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1056,6 +1200,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1067,6 +1212,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1095,6 +1241,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1117,6 +1264,24 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1129,6 +1294,18 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1152,6 +1329,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1159,6 +1337,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, @@ -1180,11 +1359,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1192,11 +1377,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "ec2": service{ @@ -1216,6 +1426,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1249,6 +1460,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1277,11 +1489,12 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ @@ -1299,6 +1512,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1311,12 +1525,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1340,6 +1556,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1367,6 +1584,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -1431,11 +1649,12 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ @@ -1453,6 +1672,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1463,6 +1683,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1474,6 +1695,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1488,25 +1710,57 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1546,6 +1800,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1556,6 +1811,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1563,9 +1819,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1579,19 +1838,32 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1603,11 +1875,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1652,7 +1949,9 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1666,16 +1965,23 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1690,15 +1996,120 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kafka": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1717,6 +2128,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1729,11 +2141,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1753,12 +2168,6 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1771,6 +2180,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1778,6 +2188,19 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -1793,6 +2216,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1803,6 +2227,7 @@ var awsPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1814,6 +2239,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1854,6 +2280,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1883,6 +2310,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1922,9 +2350,13 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1938,6 +2370,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1977,6 +2410,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2025,6 +2459,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2037,15 +2472,42 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mturk-requester": service{ @@ -2091,12 +2553,24 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2109,6 +2583,12 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2129,6 +2609,65 @@ var awsPartition = partition{ }, }, }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -2212,19 +2751,88 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -2234,6 +2842,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2258,6 +2867,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -2282,6 +2892,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2295,10 +2906,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2317,19 +2932,47 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "robomaker": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2388,6 +3031,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2395,16 +3039,44 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "s3": service{ - PartitionEndpoint: "us-east-1", + PartitionEndpoint: "aws-global", IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2429,6 +3101,12 @@ var awsPartition = partition{ Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, @@ -2436,8 +3114,9 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2449,10 +3128,7 @@ var awsPartition = partition{ Hostname: "s3.sa-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{ Hostname: "s3.us-west-1.amazonaws.com", @@ -2615,6 +3291,19 @@ var awsPartition = partition{ }, }, }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2636,6 +3325,7 @@ var awsPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2647,6 +3337,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -2681,6 +3372,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2688,9 +3380,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2703,6 +3397,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, "ap-northeast-1": endpoint{ Protocols: []string{"https"}, }, @@ -2736,6 +3433,9 @@ var awsPartition = partition{ "eu-west-3": endpoint{ Protocols: []string{"https"}, }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, "sa-east-1": endpoint{ Protocols: []string{"https"}, }, @@ -2801,6 +3501,7 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2808,9 +3509,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2818,6 +3521,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ @@ -2831,6 +3544,7 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2842,6 +3556,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2853,6 +3568,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2885,6 +3601,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2934,7 +3651,8 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -2958,6 +3676,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2980,6 +3699,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2990,6 +3710,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3001,6 +3722,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3022,11 +3744,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -3034,47 +3762,63 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "sts": service{ PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3105,9 +3849,15 @@ var awsPartition = partition{ }, }, "support": service{ + PartitionEndpoint: "aws-global", Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "swf": service{ @@ -3125,6 +3875,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3147,6 +3898,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3154,6 +3906,40 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "transfer": service{ Endpoints: endpoints{ @@ -3164,9 +3950,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3226,12 +4014,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3278,6 +4070,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3289,6 +4082,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3374,6 +4168,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -3429,6 +4230,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3554,6 +4361,21 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -3574,7 +4396,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -3584,6 +4407,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -3591,6 +4421,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -3741,6 +4578,18 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "swf": service{ Endpoints: endpoints{ @@ -3755,6 +4604,31 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, }, } @@ -3845,6 +4719,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "athena": service{ Endpoints: endpoints{ @@ -3898,9 +4789,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "codecommit": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3938,6 +4837,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3963,6 +4874,12 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -4064,6 +4981,7 @@ var awsusgovPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4078,6 +4996,16 @@ var awsusgovPartition = partition{ }, "glue": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -4091,6 +5019,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4175,6 +5109,7 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4185,6 +5120,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4204,6 +5156,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -4224,6 +5183,45 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -4287,25 +5285,69 @@ var awsusgovPartition = partition{ }, }, }, - "sms": service{ + "secretsmanager": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, }, - }, - "sns": service{ - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, @@ -4348,6 +5390,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -4378,6 +5426,14 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "translate": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -4406,3 +5462,630 @@ var awsusgovPartition = partition{ }, }, } + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index f82babf6..1f53d9cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -46,6 +47,108 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -170,10 +279,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } @@ -191,7 +303,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -347,6 +459,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000..df75e899 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index ff6f76db..eb2ac83c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -54,8 +54,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } @@ -74,24 +75,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil } func serviceList(ss services) []string { @@ -200,7 +233,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -208,11 +241,23 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op merged.mergeIn(e) e = merged - hostname := e.Hostname + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname // Offset the hostname for dualstack if enabled if opts.UseDualStack && e.HasDualStack == boxedTrue { hostname = e.DualStackHostname + region = signingRegion } u := strings.Replace(hostname, "{service}", service, 1) @@ -222,20 +267,9 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index 271da432..d9b37f4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 8ef8548a..185b0731 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -23,7 +23,7 @@ type Handlers struct { Complete HandlerList } -// Copy returns of this handler's lists. +// Copy returns a copy of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), @@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers { } } -// Clear removes callback functions for all handlers +// Clear removes callback functions for all handlers. func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() @@ -59,6 +59,51 @@ func (h *Handlers) Clear() { h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index b0c2ef4f..9370fa50 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 19da3fcd..52178141 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -64,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -90,8 +99,12 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// New returns a new Request pointer for the service API -// operation and parameters. +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response @@ -99,6 +112,10 @@ type Operation struct { func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + if retryer == nil { + retryer = noOpRetryer{} + } + method := operation.HTTPMethod if method == "" { method = "POST" @@ -231,6 +248,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -259,7 +280,18 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } @@ -330,16 +362,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -358,12 +389,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -379,7 +410,7 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } @@ -387,12 +418,16 @@ func (r *Request) Sign() error { return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -409,10 +444,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -473,29 +508,28 @@ func (r *Request) Send() error { r.AttemptTime = time.Now() if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", false, err) + debugLogReqError(r, "Sign Request", notRetrying, err) return err } if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryCancel(r.Error) { - return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } - r.prepareRetry() - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } } } -func (r *Request) prepareRetry() { +func (r *Request) prepareRetry() error { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) @@ -506,12 +540,19 @@ func (r *Request) prepareRetry() { // the request's body even though the Client's Do returned. r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } // Closing response body to ensure that no response body is leaked // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { r.HTTPResponse.Body.Close() } + + return nil } func (r *Request) sendRequest() (sendErr error) { @@ -520,7 +561,9 @@ func (r *Request) sendRequest() (sendErr error) { r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -528,13 +571,17 @@ func (r *Request) sendRequest() (sendErr error) { r.Handlers.ValidateResponse.Run(r) if r.Error != nil { r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } r.Handlers.Unmarshal.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -561,48 +608,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryCancel(err error) bool { - switch err := err.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryCancel(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryCancel(err.Err) - case temporary: - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 7c6a8000..de1292f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index a633ed5a..64784e16 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will @@ -146,7 +148,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index d0aa54c6..8015acc6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,26 +1,75 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } cfg.Retryer = retryer return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 } // retryableCodes is a collection of service response codes which are retry-able @@ -76,10 +125,6 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } -type temporaryError interface { - Temporary() bool -} - func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -98,7 +143,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporaryError); ok { + if t, ok := err.(temporary); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -108,32 +153,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +246,58 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000..cc64e24f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,259 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 38a7b05a..7ec66e7e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,97 +1,93 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) - svc := s3.New(sess) +Creating Sessions -Create Session With Option Overrides +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create Session + sess, err := session.NewSession() -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Printf("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,24 +114,31 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -150,40 +146,16 @@ is set. mfa_serial = role_session_name = session_name -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscreds.AssumeRoleProvider +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index e3959b95..4092ab8f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,12 +1,14 @@ package session import ( + "fmt" "os" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -99,21 +101,55 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string + CSMHost string CSMClientID string - enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // // AWS_ENABLE_ENDPOINT_DISCOVERY=true EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -150,6 +186,21 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -158,7 +209,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -169,30 +220,42 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = EnvProviderName + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v } regionKeys := regionEnvKeys @@ -223,12 +286,33 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index be4b5f07..ab6daac7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,19 +8,17 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" ) const ( @@ -75,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -95,19 +93,28 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } } return s @@ -126,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the +// control through code how the Session will be created, such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -210,6 +217,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -224,6 +237,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -257,13 +276,20 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -329,27 +355,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { - logger.Log("Enabling CSM") - if len(port) == 0 { - port = csm.DefaultPort +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, "127.0.0.1:"+port) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { - return + return err } r.InjectHandlers(handlers) + + return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -366,9 +398,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -381,8 +421,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } } // Setup HTTP client with custom cert bundle if enabled @@ -395,6 +443,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -443,9 +531,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -464,162 +554,51 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - - // inspect the profile to see if a credential source has been specified. - if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { - - // if both credential_source and source_profile have been set, return an error - // as this is undefined behavior. - if len(sharedCfg.AssumeRole.SourceProfile) > 0 { - return ErrSharedConfigSourceCollision - } - - // valid credential source values - const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - ) - - switch sharedCfg.AssumeRole.CredentialSource { - case credSourceEc2Metadata: - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - cfgCp.Credentials = credentials.NewCredentials(p) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - case credSourceEnvironment: - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return ErrSharedConfigECSContainerEnvVarEmpty - } - - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - creds := credentials.NewCredentials(p) - - cfg.Credentials = creds - default: - return ErrSharedConfigInvalidCredSource - } - - return nil - } - - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else if len(sharedCfg.CredentialProcess) > 0 { - cfg.Credentials = processcreds.NewCredentials( - sharedCfg.CredentialProcess, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err } + cfg.Credentials = creds } return nil } -func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } } -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } } func initHandlers(s *Session) { @@ -630,7 +609,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, coping the config +// Copy creates and returns a copy of the current Session, copying the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // @@ -650,37 +629,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil && s.Config.Logger != nil { + s.Config.Logger.Log(fmt.Sprintf( + "ERROR: unable to resolve endpoint for service %q, region %q, err: %v", + service, region, err)) } return client.Config{ @@ -690,7 +647,42 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -700,12 +692,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -717,3 +706,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 7cb44021..1d7b049c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -23,13 +23,29 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process - credentialProcessKey = `credential_process` + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name @@ -37,36 +53,33 @@ const ( DefaultSharedConfigProfile = `default` ) -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - CredentialSource string - ExternalID string - MFASerial string - RoleSessionName string -} - // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string - // An external process to request credentials - CredentialProcess string + SourceProfileName string + SourceProfile *sharedConfig - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. // // region Region string @@ -76,6 +89,23 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } type sharedConfigFile struct { @@ -83,17 +113,18 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -104,16 +135,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -137,60 +163,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - if len(cfg.AssumeRole.CredentialSource) > 0 { - // setAssumeRoleSource is only called when source_profile is found. - // If both source_profile and credential_source are set, then - // ErrSharedConfigSourceCollision will be returned - return ErrSharedConfigSourceCollision +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr } - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err } } + profiles[profile] = struct{}{} - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if err := cfg.validateCredentialType(); err != nil { + return err } - cfg.AssumeRoleSource = &assumeRoleSrc + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } } return err } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -200,53 +254,160 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e } } - // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre } - } - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - credentialSource := section.String(credentialSourceKey) - hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 - if len(roleArn) > 0 && hasSource { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - CredentialSource: credentialSource, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre } } - // `credential_process` - if credProc := section.String(credentialProcessKey); len(credProc) > 0 { - cfg.CredentialProcess = credProc - } + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - // Region - if v := section.String(regionKey); len(v) > 0 { - cfg.Region = v + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } } return nil } +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string @@ -304,7 +465,8 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string + RoleARN string + SourceProfile string } // Code is the short id of the error. @@ -314,8 +476,10 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) } // OrigErr is the underlying error that caused the failure. @@ -327,3 +491,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 523db79f..8104793a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte { return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { io.CopyN(hash, reader, size) } - return hash.Sum(nil) + return hash.Sum(nil), nil } const doubleSpace = " " diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 8b6f2342..45509154 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -7,13 +7,18 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b82d20b7..14898362 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.19.39" +const SDKVersion = "1.25.37" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index f9970337..cf9fad81 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -162,7 +162,7 @@ loop: if len(tokens) == 0 { break loop } - + // if should skip is true, we skip the tokens until should skip is set to false. step = SkipTokenState } @@ -218,7 +218,7 @@ loop: // S -> equal_expr' expr_stmt' switch k.Kind { case ASTKindEqualExpr: - // assiging a value to some key + // assigning a value to some key k.AppendChild(newExpression(tok)) stack.Push(newExprStatement(k)) case ASTKindExpr: @@ -250,6 +250,13 @@ loop: if !runeCompare(tok.Raw(), openBrace) { return nil, NewParseError("expected '['") } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } stmt := newStatement() stack.Push(stmt) @@ -304,7 +311,9 @@ loop: stmt := newCommentStatement(tok) stack.Push(stmt) default: - return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) } if len(tokens) > 0 { @@ -314,7 +323,7 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) } // returns a sublist which excludes the start symbol diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go index 6bb69644..da7a4049 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -22,24 +22,24 @@ func newSkipper() skipper { } func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). if s.shouldSkip && s.prevTok.Type() == TokenNL && tok.Type() != TokenWS { - s.Continue() return false } s.prevTok = tok - return s.shouldSkip } func (s *skipper) Skip() { s.shouldSkip = true - s.prevTok = emptyToken } func (s *skipper) Continue() { s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false s.prevTok = emptyToken } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000..6c443988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000..44898eed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000..810ec7f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000..f4651da2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000..b1d93a33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index de021367..74e361e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f..05d4ff51 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000..c1a51185 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 515ce152..42f71648 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 9e610591..9c5ed454 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -676,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // Returned Error Codes: // * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// This error is returned if the message passed to DecodeAuthorizationMessage +// was invalid. This can happen if the token contains invalid characters, such +// as linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -702,6 +702,102 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the @@ -746,8 +842,15 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM identity whose credentials are used to call -// the API. +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1129,7 +1232,7 @@ type AssumeRoleInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // The characters in this parameter count towards the 2048 character session @@ -1407,7 +1510,7 @@ type AssumeRoleWithSAMLInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // The characters in this parameter count towards the 2048 character session @@ -1702,7 +1805,7 @@ type AssumeRoleWithWebIdentityInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // The characters in this parameter count towards the 2048 character session @@ -2156,6 +2259,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercased letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2284,7 +2454,7 @@ type GetFederationTokenInput struct { // use as managed session policies. The plain text that you use for both inline // and managed session policies shouldn't exceed 2048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -2545,7 +2715,7 @@ type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. Arn *string `locationName:"arn" min:"20" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000..d5307fca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c3..a3e378ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -34,9 +34,9 @@ const ( // ErrCodeInvalidAuthorizationMessageException for service response error code // "InvalidAuthorizationMessageException". // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. + // This error is returned if the message passed to DecodeAuthorizationMessage + // was invalid. This can happen if the token contains invalid characters, such + // as linebreaks. ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" // ErrCodeInvalidIdentityTokenException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d..2c3c3d2c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -46,11 +46,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000..e2e1d6ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000..339177be --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 00000000..1602287d --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..d7d14f8e --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 00000000..9e131146 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 00000000..37d60fc3 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 00000000..ff177f61 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 00000000..fceda751 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 00000000..71c1dd1b --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 00000000..d99fda19 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 00000000..c2093a80 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE new file mode 100644 index 00000000..5782c726 --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Elazar Leibovich +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/README.md b/vendor/github.com/elazarl/go-bindata-assetfs/README.md new file mode 100644 index 00000000..27ee48f0 --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/README.md @@ -0,0 +1,46 @@ +# go-bindata-assetfs + +Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`. + +[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs) + +### Installation + +Install with + + $ go get github.com/jteeuwen/go-bindata/... + $ go get github.com/elazarl/go-bindata-assetfs/... + +### Creating embedded data + +Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage, +instead of running `go-bindata` run `go-bindata-assetfs`. + +The tool will create a `bindata_assetfs.go` file, which contains the embedded data. + +A typical use case is + + $ go-bindata-assetfs data/... + +### Using assetFS in your code + +The generated file provides an `assetFS()` function that returns a `http.Filesystem` +wrapping the embedded files. What you usually want to do is: + + http.Handle("/", http.FileServer(assetFS())) + +This would run an HTTP server serving the embedded files. + +## Without running binary tool + +You can always just run the `go-bindata` tool, and then + +use + + import "github.com/elazarl/go-bindata-assetfs" + ... + http.Handle("/", + http.FileServer( + &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"})) + +to serve files embedded from the `data` directory. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go new file mode 100644 index 00000000..04f6d7a3 --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go @@ -0,0 +1,167 @@ +package assetfs + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +var ( + defaultFileTimestamp = time.Now() +) + +// FakeFile implements os.FileInfo interface for a given path and size +type FakeFile struct { + // Path is the path of this file + Path string + // Dir marks of the path is a directory + Dir bool + // Len is the length of the fake file, zero if it is a directory + Len int64 + // Timestamp is the ModTime of this file + Timestamp time.Time +} + +func (f *FakeFile) Name() string { + _, name := filepath.Split(f.Path) + return name +} + +func (f *FakeFile) Mode() os.FileMode { + mode := os.FileMode(0644) + if f.Dir { + return mode | os.ModeDir + } + return mode +} + +func (f *FakeFile) ModTime() time.Time { + return f.Timestamp +} + +func (f *FakeFile) Size() int64 { + return f.Len +} + +func (f *FakeFile) IsDir() bool { + return f.Mode().IsDir() +} + +func (f *FakeFile) Sys() interface{} { + return nil +} + +// AssetFile implements http.File interface for a no-directory file with content +type AssetFile struct { + *bytes.Reader + io.Closer + FakeFile +} + +func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile { + if timestamp.IsZero() { + timestamp = defaultFileTimestamp + } + return &AssetFile{ + bytes.NewReader(content), + ioutil.NopCloser(nil), + FakeFile{name, false, int64(len(content)), timestamp}} +} + +func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { + return nil, errors.New("not a directory") +} + +func (f *AssetFile) Size() int64 { + return f.FakeFile.Size() +} + +func (f *AssetFile) Stat() (os.FileInfo, error) { + return f, nil +} + +// AssetDirectory implements http.File interface for a directory +type AssetDirectory struct { + AssetFile + ChildrenRead int + Children []os.FileInfo +} + +func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { + fileinfos := make([]os.FileInfo, 0, len(children)) + for _, child := range children { + _, err := fs.AssetDir(filepath.Join(name, child)) + fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}}) + } + return &AssetDirectory{ + AssetFile{ + bytes.NewReader(nil), + ioutil.NopCloser(nil), + FakeFile{name, true, 0, time.Time{}}, + }, + 0, + fileinfos} +} + +func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { + if count <= 0 { + return f.Children, nil + } + if f.ChildrenRead+count > len(f.Children) { + count = len(f.Children) - f.ChildrenRead + } + rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] + f.ChildrenRead += count + return rv, nil +} + +func (f *AssetDirectory) Stat() (os.FileInfo, error) { + return f, nil +} + +// AssetFS implements http.FileSystem, allowing +// embedded files to be served from net/http package. +type AssetFS struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + // AssetInfo should return the info of file in path if exists + AssetInfo func(path string) (os.FileInfo, error) + // Prefix would be prepended to http requests + Prefix string +} + +func (fs *AssetFS) Open(name string) (http.File, error) { + name = path.Join(fs.Prefix, name) + if len(name) > 0 && name[0] == '/' { + name = name[1:] + } + if b, err := fs.Asset(name); err == nil { + timestamp := defaultFileTimestamp + if fs.AssetInfo != nil { + if info, err := fs.AssetInfo(name); err == nil { + timestamp = info.ModTime() + } + } + return NewAssetFile(name, b, timestamp), nil + } + if children, err := fs.AssetDir(name); err == nil { + return NewAssetDirectory(name, children, fs), nil + } else { + // If the error is not found, return an error that will + // result in a 404 error. Otherwise the server returns + // a 500 error for files not found. + if strings.Contains(err.Error(), "not found") { + return nil, os.ErrNotExist + } + return nil, err + } +} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go new file mode 100644 index 00000000..a664249f --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go @@ -0,0 +1,13 @@ +// assetfs allows packages to serve static content embedded +// with the go-bindata tool with the standard net/http package. +// +// See https://github.com/jteeuwen/go-bindata for more information +// about embedding binary data with go-bindata. +// +// Usage example, after running +// $ go-bindata data/... +// use: +// http.Handle("/", +// http.FileServer( +// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) +package assetfs diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml new file mode 100644 index 00000000..95f8a1ff --- /dev/null +++ b/vendor/github.com/fatih/color/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.8.x + - tip + diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 00000000..7d879e9c --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 00000000..ff1617f7 --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000..25fdaf63 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000..3fc95446 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,179 @@ +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + + + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000..91c8e9f0 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000..cf1e9650 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml new file mode 100644 index 00000000..0c2c02bd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/.travis.yml @@ -0,0 +1,9 @@ +language: go +sudo: false + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md new file mode 100644 index 00000000..4ba6a8c6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md @@ -0,0 +1,49 @@ +# Version 1.x.x + +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added IEnumVARIANT test cases (#99, #100, #101). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). + +# Version 1.1.1 + + * Fixes for Linux build. + * Fixes for Windows build. + +# Version 1.1.0 + +The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. + + * Move GUID out of variables.go into its own file to make new documentation available. + * Move OleError out of ole.go into its own file to make new documentation available. + * Add documentation to utility functions. + * Add documentation to variant receiver functions. + * Add documentation to ole structures. + * Make variant available to other systems outside of Windows. + * Make OLE structures available to other systems outside of Windows. + +## New Features + + * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. + * More functions are now documented and available on godoc.org. + +# Version 1.0.1 + + 1. Fix package references from repository location change. + +# Version 1.0.0 + +This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. + +There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 00000000..623ec06f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md new file mode 100644 index 00000000..0ea9db33 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/README.md @@ -0,0 +1,46 @@ +#Go OLE + +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) +[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) + +Go bindings for Windows COM using shared libraries instead of cgo. + +By Yasuhiro Matsumoto. + +## Install + +To experiment with go-ole, you can just compile and run the example program: + +``` +go get github.com/go-ole/go-ole +cd /path/to/go-ole/ +go test + +cd /path/to/go-ole/example/excel +go run excel.go +``` + +## Continuous Integration + +Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. + +**Travis-CI** + +Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. + +**AppVeyor** + +AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. + +The tests currently do run and do pass and this should be maintained with commits. + +##Versioning + +Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. + +This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. + +##LICENSE + +Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml new file mode 100644 index 00000000..0d557ac2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -0,0 +1,54 @@ +# Notes: +# - Minimal appveyor.yml file is an empty file. All sections are optional. +# - Indent each level of configuration with 2 spaces. Do not use tabs! +# - All section names are case-sensitive. +# - Section names should be unique on each level. + +version: "1.3.0.{build}-alpha-{branch}" + +os: Windows Server 2012 R2 + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + matrix: + - GOARCH: amd64 + GOVERSION: 1.5 + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +install: + - choco install mingw + - SET PATH=c:\tools\mingw64\bin;%PATH% + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + # - set + - go version + - go env + - go get -u golang.org/x/tools/cmd/cover + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer + +build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... + - go build + - go test -v -cover ./... + +# disable automatic tests +test: off + +# disable deployment +deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go new file mode 100644 index 00000000..75ebbf13 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -0,0 +1,329 @@ +// +build windows + +package ole + +import ( + "errors" + "syscall" + "time" + "unicode/utf16" + "unsafe" +) + +var ( + procCoInitialize, _ = modole32.FindProc("CoInitialize") + procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") + procCoUninitialize, _ = modole32.FindProc("CoUninitialize") + procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") + procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") + procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") + procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") + procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") + procStringFromIID, _ = modole32.FindProc("StringFromIID") + procIIDFromString, _ = modole32.FindProc("IIDFromString") + procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") + procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") + procVariantInit, _ = modoleaut32.FindProc("VariantInit") + procVariantClear, _ = modoleaut32.FindProc("VariantClear") + procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") + procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") + procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") + procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") + procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") + procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") + procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") + procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") + + procGetMessageW, _ = moduser32.FindProc("GetMessageW") + procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx + // Suggests that no value should be passed to CoInitialized. + // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. + hr, _, _ := procCoInitialize.Call(uintptr(0)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx + // Suggests that the first parameter is not only optional but should always be NULL. + hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) (err error) { + // p is ignored and won't be used. + // Avoid any variable not used errors. + p = uintptr(0) + return coInitialize() +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) (err error) { + // Avoid any variable not used errors. + p = uintptr(0) + return coInitializeEx(coinit) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() { + procCoUninitialize.Call() +} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) { + procCoTaskMemFree.Call(memptr) +} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (clsid *GUID, err error) { + var guid GUID + lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) + hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoCreateInstance.Call( + uintptr(unsafe.Pointer(clsid)), + 0, + CLSCTX_SERVER, + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procGetActiveObject.Call( + uintptr(unsafe.Pointer(clsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) (err error) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) (err error) { + hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) (ss *int16) { + utf16 := utf16.Encode([]rune(v + "\x00")) + ptr := &utf16[0] + + pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) (err error) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint32(l) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { + hr, _, _ := procCreateStdDispatch.Call( + uintptr(unsafe.Pointer(unk)), + v, + uintptr(unsafe.Pointer(ptinfo)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { + hr, _, _ := procCreateDispTypeInfo.Call( + uintptr(unsafe.Pointer(idata)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&pptinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { + procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) +} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() (lcid uint32) { + ret, _, _ := procGetUserDefaultLCID.Call() + lcid = uint32(ret) + return +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { + r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) + ret = int32(r0) + return +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) + ret = int32(r0) + return +} + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value float64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go new file mode 100644 index 00000000..425aad32 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com_func.go @@ -0,0 +1,174 @@ +// +build !windows + +package ole + +import ( + "time" + "unsafe" +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() error { + return NewError(E_NOTIMPL) +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) error { + return NewError(E_NOTIMPL) +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() {} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) {} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) *int16 { + u := int16(0) + return &u +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) *int16 { + u := int16(0) + return &u +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) error { + return NewError(E_NOTIMPL) +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + return uint32(0) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { + return nil, NewError(E_NOTIMPL) +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() uint32 { + return uint32(0) +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) int32 { + return int32(0) +} + +func GetVariantDate(value float64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go new file mode 100644 index 00000000..b2ac2ec6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/connect.go @@ -0,0 +1,192 @@ +package ole + +// Connection contains IUnknown for fluent interface interaction. +// +// Deprecated. Use oleutil package instead. +type Connection struct { + Object *IUnknown // Access COM +} + +// Initialize COM. +func (*Connection) Initialize() (err error) { + return coInitialize() +} + +// Uninitialize COM. +func (*Connection) Uninitialize() { + CoUninitialize() +} + +// Create IUnknown object based first on ProgId and then from String. +func (c *Connection) Create(progId string) (err error) { + var clsid *GUID + clsid, err = CLSIDFromProgID(progId) + if err != nil { + clsid, err = CLSIDFromString(progId) + if err != nil { + return + } + } + + unknown, err := CreateInstance(clsid, IID_IUnknown) + if err != nil { + return + } + c.Object = unknown + + return +} + +// Release IUnknown object. +func (c *Connection) Release() { + c.Object.Release() +} + +// Load COM object from list of programIDs or strings. +func (c *Connection) Load(names ...string) (errors []error) { + var tempErrors []error = make([]error, len(names)) + var numErrors int = 0 + for _, name := range names { + err := c.Create(name) + if err != nil { + tempErrors = append(tempErrors, err) + numErrors += 1 + continue + } + break + } + + copy(errors, tempErrors[0:numErrors]) + return +} + +// Dispatch returns Dispatch object. +func (c *Connection) Dispatch() (object *Dispatch, err error) { + dispatch, err := c.Object.QueryInterface(IID_IDispatch) + if err != nil { + return + } + object = &Dispatch{dispatch} + return +} + +// Dispatch stores IDispatch object. +type Dispatch struct { + Object *IDispatch // Dispatch object. +} + +// Call method on IDispatch with parameters. +func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(method) + if err != nil { + return + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + return +} + +// MustCall method on IDispatch with parameters. +func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(method) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + if err != nil { + panic(err) + } + + return +} + +// Get property on IDispatch with parameters. +func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + return +} + +// MustGet property on IDispatch with parameters. +func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + if err != nil { + panic(err) + } + return +} + +// Set property on IDispatch with parameters. +func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + return +} + +// MustSet property on IDispatch with parameters. +func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + if err != nil { + panic(err) + } + return +} + +// GetId retrieves ID of name on IDispatch. +func (d *Dispatch) GetId(name string) (id int32, err error) { + var dispid []int32 + dispid, err = d.Object.GetIDsOfName([]string{name}) + if err != nil { + return + } + id = dispid[0] + return +} + +// GetIds retrieves all IDs of names on IDispatch. +func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { + dispid, err = d.Object.GetIDsOfName(names) + return +} + +// Invoke IDispatch on DisplayID of dispatch type with parameters. +// +// There have been problems where if send cascading params..., it would error +// out because the parameters would be empty. +func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { + if len(params) < 1 { + result, err = d.Object.Invoke(id, dispatch) + } else { + result, err = d.Object.Invoke(id, dispatch, params...) + } + return +} + +// Release IDispatch object. +func (d *Dispatch) Release() { + d.Object.Release() +} + +// Connect initializes COM and attempts to load IUnknown based on given names. +func Connect(names ...string) (connection *Connection) { + connection.Initialize() + connection.Load(names...) + return +} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go new file mode 100644 index 00000000..fd0c6d74 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/constants.go @@ -0,0 +1,153 @@ +package ole + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + S_OK = 0x00000000 + E_UNEXPECTED = 0x8000FFFF + E_NOTIMPL = 0x80004001 + E_OUTOFMEMORY = 0x8007000E + E_INVALIDARG = 0x80070057 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_HANDLE = 0x80070006 + E_ABORT = 0x80004004 + E_FAIL = 0x80004005 + E_ACCESSDENIED = 0x80070005 + E_PENDING = 0x8000000A + + CO_E_CLASSSTRING = 0x800401F3 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +type VT uint16 + +const ( + VT_EMPTY VT = 0x0 + VT_NULL VT = 0x1 + VT_I2 VT = 0x2 + VT_I4 VT = 0x3 + VT_R4 VT = 0x4 + VT_R8 VT = 0x5 + VT_CY VT = 0x6 + VT_DATE VT = 0x7 + VT_BSTR VT = 0x8 + VT_DISPATCH VT = 0x9 + VT_ERROR VT = 0xa + VT_BOOL VT = 0xb + VT_VARIANT VT = 0xc + VT_UNKNOWN VT = 0xd + VT_DECIMAL VT = 0xe + VT_I1 VT = 0x10 + VT_UI1 VT = 0x11 + VT_UI2 VT = 0x12 + VT_UI4 VT = 0x13 + VT_I8 VT = 0x14 + VT_UI8 VT = 0x15 + VT_INT VT = 0x16 + VT_UINT VT = 0x17 + VT_VOID VT = 0x18 + VT_HRESULT VT = 0x19 + VT_PTR VT = 0x1a + VT_SAFEARRAY VT = 0x1b + VT_CARRAY VT = 0x1c + VT_USERDEFINED VT = 0x1d + VT_LPSTR VT = 0x1e + VT_LPWSTR VT = 0x1f + VT_RECORD VT = 0x24 + VT_INT_PTR VT = 0x25 + VT_UINT_PTR VT = 0x26 + VT_FILETIME VT = 0x40 + VT_BLOB VT = 0x41 + VT_STREAM VT = 0x42 + VT_STORAGE VT = 0x43 + VT_STREAMED_OBJECT VT = 0x44 + VT_STORED_OBJECT VT = 0x45 + VT_BLOB_OBJECT VT = 0x46 + VT_CF VT = 0x47 + VT_CLSID VT = 0x48 + VT_BSTR_BLOB VT = 0xfff + VT_VECTOR VT = 0x1000 + VT_ARRAY VT = 0x2000 + VT_BYREF VT = 0x4000 + VT_RESERVED VT = 0x8000 + VT_ILLEGAL VT = 0xffff + VT_ILLEGALMASKED VT = 0xfff + VT_TYPEMASK VT = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + TKIND_ENUM = 1 + TKIND_RECORD = 2 + TKIND_MODULE = 3 + TKIND_INTERFACE = 4 + TKIND_DISPATCH = 5 + TKIND_COCLASS = 6 + TKIND_ALIAS = 7 + TKIND_UNION = 8 + TKIND_MAX = 9 +) + +// Safe Array Feature Flags + +const ( + FADF_AUTO = 0x0001 + FADF_STATIC = 0x0002 + FADF_EMBEDDED = 0x0004 + FADF_FIXEDSIZE = 0x0010 + FADF_RECORD = 0x0020 + FADF_HAVEIID = 0x0040 + FADF_HAVEVARTYPE = 0x0080 + FADF_BSTR = 0x0100 + FADF_UNKNOWN = 0x0200 + FADF_DISPATCH = 0x0400 + FADF_VARIANT = 0x0800 + FADF_RESERVED = 0xF008 +) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go new file mode 100644 index 00000000..096b456d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error.go @@ -0,0 +1,51 @@ +package ole + +// OleError stores COM errors. +type OleError struct { + hr uintptr + description string + subError error +} + +// NewError creates new error with HResult. +func NewError(hr uintptr) *OleError { + return &OleError{hr: hr} +} + +// NewErrorWithDescription creates new COM error with HResult and description. +func NewErrorWithDescription(hr uintptr, description string) *OleError { + return &OleError{hr: hr, description: description} +} + +// NewErrorWithSubError creates new COM error with parent error. +func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { + return &OleError{hr: hr, description: description, subError: err} +} + +// Code is the HResult. +func (v *OleError) Code() uintptr { + return uintptr(v.hr) +} + +// String description, either manually set or format message with error code. +func (v *OleError) String() string { + if v.description != "" { + return errstr(int(v.hr)) + " (" + v.description + ")" + } + return errstr(int(v.hr)) +} + +// Error implements error interface. +func (v *OleError) Error() string { + return v.String() +} + +// Description retrieves error summary, if there is one. +func (v *OleError) Description() string { + return v.description +} + +// SubError returns parent error, if there is one. +func (v *OleError) SubError() error { + return v.subError +} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go new file mode 100644 index 00000000..8a2ffaa2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_func.go @@ -0,0 +1,8 @@ +// +build !windows + +package ole + +// errstr converts error code to string. +func errstr(errno int) string { + return "" +} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go new file mode 100644 index 00000000..d0e8e685 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package ole + +import ( + "fmt" + "syscall" + "unicode/utf16" +) + +// errstr converts error code to string. +func errstr(errno int) string { + // ask windows for the remaining errors + var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + b := make([]uint16, 300) + n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) + if err != nil { + return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go new file mode 100644 index 00000000..8d20f68f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/guid.go @@ -0,0 +1,284 @@ +package ole + +var ( + // IID_NULL is null Interface ID, used when no other Interface ID is known. + IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") + + // IID_IUnknown is for IUnknown interfaces. + IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") + + // IID_IDispatch is for IDispatch interfaces. + IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") + + // IID_IEnumVariant is for IEnumVariant interfaces + IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") + + // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. + IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") + + // IID_IConnectionPoint is for IConnectionPoint interfaces. + IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") + + // IID_IInspectable is for IInspectable interfaces. + IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") + + // IID_IProvideClassInfo is for IProvideClassInfo interfaces. + IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") +) + +// These are for testing and not part of any library. +var ( + // IID_ICOMTestString is for ICOMTestString interfaces. + // + // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} + IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") + + // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. + // + // {BEB06610-EB84-4155-AF58-E2BFF53680B4} + IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") + + // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. + // + // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} + IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") + + // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. + // + // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} + IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") + + // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. + // + // {8D437CBC-B3ED-485C-BC32-C336432A1623} + IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") + + // IID_ICOMTestFloat is for ICOMTestFloat interfaces. + // + // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} + IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") + + // IID_ICOMTestDouble is for ICOMTestDouble interfaces. + // + // {BF908A81-8687-4E93-999F-D86FAB284BA0} + IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") + + // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. + // + // {D530E7A6-4EE8-40D1-8931-3D63B8605010} + IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") + + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. + // + // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} + IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") + + // IID_ICOMTestTypes is for ICOMTestTypes interfaces. + // + // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} + IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") + + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") + + // CLSID_COMTestScalarClass is for COMTestScalarClass class. + // + // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} + CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") +) + +const hextable = "0123456789ABCDEF" +const emptyGUID = "{00000000-0000-0000-0000-000000000000}" + +// GUID is Windows API specific GUID type. +// +// This exists to match Windows GUID type for direct passing for COM. +// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// NewGUID converts the given string into a globally unique identifier that is +// compliant with the Windows API. +// +// The supplied string may be in any of these formats: +// +// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// The conversion of the supplied string is not case-sensitive. +func NewGUID(guid string) *GUID { + d := []byte(guid) + var d1, d2, d3, d4a, d4b []byte + + switch len(d) { + case 38: + if d[0] != '{' || d[37] != '}' { + return nil + } + d = d[1:37] + fallthrough + case 36: + if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { + return nil + } + d1 = d[0:8] + d2 = d[9:13] + d3 = d[14:18] + d4a = d[19:23] + d4b = d[24:36] + case 32: + d1 = d[0:8] + d2 = d[8:12] + d3 = d[12:16] + d4a = d[16:20] + d4b = d[20:32] + default: + return nil + } + + var g GUID + var ok1, ok2, ok3, ok4 bool + g.Data1, ok1 = decodeHexUint32(d1) + g.Data2, ok2 = decodeHexUint16(d2) + g.Data3, ok3 = decodeHexUint16(d3) + g.Data4, ok4 = decodeHexByte64(d4a, d4b) + if ok1 && ok2 && ok3 && ok4 { + return &g + } + return nil +} + +func decodeHexUint32(src []byte) (value uint32, ok bool) { + var b1, b2, b3, b4 byte + var ok1, ok2, ok3, ok4 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + b3, ok3 = decodeHexByte(src[4], src[5]) + b4, ok4 = decodeHexByte(src[6], src[7]) + value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) + ok = ok1 && ok2 && ok3 && ok4 + return +} + +func decodeHexUint16(src []byte) (value uint16, ok bool) { + var b1, b2 byte + var ok1, ok2 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + value = (uint16(b1) << 8) | uint16(b2) + ok = ok1 && ok2 + return +} + +func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { + var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool + value[0], ok1 = decodeHexByte(s1[0], s1[1]) + value[1], ok2 = decodeHexByte(s1[2], s1[3]) + value[2], ok3 = decodeHexByte(s2[0], s2[1]) + value[3], ok4 = decodeHexByte(s2[2], s2[3]) + value[4], ok5 = decodeHexByte(s2[4], s2[5]) + value[5], ok6 = decodeHexByte(s2[6], s2[7]) + value[6], ok7 = decodeHexByte(s2[8], s2[9]) + value[7], ok8 = decodeHexByte(s2[10], s2[11]) + ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 + return +} + +func decodeHexByte(c1, c2 byte) (value byte, ok bool) { + var n1, n2 byte + var ok1, ok2 bool + n1, ok1 = decodeHexChar(c1) + n2, ok2 = decodeHexChar(c2) + value = (n1 << 4) | n2 + ok = ok1 && ok2 + return +} + +func decodeHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} + +// String converts the GUID to string form. It will adhere to this pattern: +// +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// If the GUID is nil, the string representation of an empty GUID is returned: +// +// {00000000-0000-0000-0000-000000000000} +func (guid *GUID) String() string { + if guid == nil { + return emptyGUID + } + + var c [38]byte + c[0] = '{' + putUint32Hex(c[1:9], guid.Data1) + c[9] = '-' + putUint16Hex(c[10:14], guid.Data2) + c[14] = '-' + putUint16Hex(c[15:19], guid.Data3) + c[19] = '-' + putByteHex(c[20:24], guid.Data4[0:2]) + c[24] = '-' + putByteHex(c[25:37], guid.Data4[2:8]) + c[37] = '}' + return string(c[:]) +} + +func putUint32Hex(b []byte, v uint32) { + b[0] = hextable[byte(v>>24)>>4] + b[1] = hextable[byte(v>>24)&0x0f] + b[2] = hextable[byte(v>>16)>>4] + b[3] = hextable[byte(v>>16)&0x0f] + b[4] = hextable[byte(v>>8)>>4] + b[5] = hextable[byte(v>>8)&0x0f] + b[6] = hextable[byte(v)>>4] + b[7] = hextable[byte(v)&0x0f] +} + +func putUint16Hex(b []byte, v uint16) { + b[0] = hextable[byte(v>>8)>>4] + b[1] = hextable[byte(v>>8)&0x0f] + b[2] = hextable[byte(v)>>4] + b[3] = hextable[byte(v)&0x0f] +} + +func putByteHex(dst, src []byte) { + for i := 0; i < len(src); i++ { + dst[i*2] = hextable[src[i]>>4] + dst[i*2+1] = hextable[src[i]&0x0f] + } +} + +// IsEqualGUID compares two GUID. +// +// Not constant time comparison. +func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { + return guid1.Data1 == guid2.Data1 && + guid1.Data2 == guid2.Data2 && + guid1.Data3 == guid2.Data3 && + guid1.Data4[0] == guid2.Data4[0] && + guid1.Data4[1] == guid2.Data4[1] && + guid1.Data4[2] == guid2.Data4[2] && + guid1.Data4[3] == guid2.Data4[3] && + guid1.Data4[4] == guid2.Data4[4] && + guid1.Data4[5] == guid2.Data4[5] && + guid1.Data4[6] == guid2.Data4[6] && + guid1.Data4[7] == guid2.Data4[7] +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go new file mode 100644 index 00000000..9e6c49f4 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go @@ -0,0 +1,20 @@ +package ole + +import "unsafe" + +type IConnectionPoint struct { + IUnknown +} + +type IConnectionPointVtbl struct { + IUnknownVtbl + GetConnectionInterface uintptr + GetConnectionPointContainer uintptr + Advise uintptr + Unadvise uintptr + EnumConnections uintptr +} + +func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { + return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go new file mode 100644 index 00000000..5414dc3c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go @@ -0,0 +1,21 @@ +// +build !windows + +package ole + +import "unsafe" + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + return int32(0) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go new file mode 100644 index 00000000..32bc1832 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + // XXX: This doesn't look like it does what it's supposed to + return release((*IUnknown)(unsafe.Pointer(v))) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Advise, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(&cookie))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Unadvise, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(cookie), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go new file mode 100644 index 00000000..165860d1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go @@ -0,0 +1,17 @@ +package ole + +import "unsafe" + +type IConnectionPointContainer struct { + IUnknown +} + +type IConnectionPointContainerVtbl struct { + IUnknownVtbl + EnumConnectionPoints uintptr + FindConnectionPoint uintptr +} + +func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { + return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go new file mode 100644 index 00000000..5dfa42aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go @@ -0,0 +1,11 @@ +// +build !windows + +package ole + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go new file mode 100644 index 00000000..ad30d79e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().FindConnectionPoint, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(point))) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go new file mode 100644 index 00000000..d4af1240 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch.go @@ -0,0 +1,94 @@ +package ole + +import "unsafe" + +type IDispatch struct { + IUnknown +} + +type IDispatchVtbl struct { + IUnknownVtbl + GetTypeInfoCount uintptr + GetTypeInfo uintptr + GetIDsOfNames uintptr + Invoke uintptr +} + +func (v *IDispatch) VTable() *IDispatchVtbl { + return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { + dispid, err = getIDsOfName(v, names) + return +} + +func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + result, err = invoke(v, dispid, dispatch, params...) + return +} + +func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { + c, err = getTypeInfoCount(v) + return +} + +func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { + tinfo, err = getTypeInfo(v) + return +} + +// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. +// +// This replaces the common pattern of attempting to get a single name from the list of available +// IDs. It gives the first ID, if it is available. +func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { + var displayIDs []int32 + displayIDs, err = v.GetIDsOfName([]string{name}) + if err != nil { + return + } + displayID = displayIDs[0] + return +} + +// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. +// +// Accepts name and will attempt to retrieve Display ID to pass to Invoke. +// +// Passing params as an array is a workaround that could be fixed in later versions of Go that +// prevent passing empty params. During testing it was discovered that this is an acceptable way of +// getting around not being able to pass params normally. +func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { + displayID, err := v.GetSingleIDOfName(name) + if err != nil { + return + } + + if len(params) < 1 { + result, err = v.Invoke(displayID, dispatch) + } else { + result, err = v.Invoke(displayID, dispatch, params...) + } + + return +} + +// CallMethod invokes named function with arguments on object. +func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) +} + +// GetProperty retrieves the property with the name with the ability to pass arguments. +// +// Most of the time you will not need to pass arguments as most objects do not allow for this +// feature. Or at least, should not allow for this feature. Some servers don't follow best practices +// and this is provided for those edge cases. +func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) +} + +// PutProperty attempts to mutate a property in the object. +func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go new file mode 100644 index 00000000..b8fbbe31 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { + return []int32{}, NewError(E_NOTIMPL) +} + +func getTypeInfoCount(disp *IDispatch) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { + return nil, NewError(E_NOTIMPL) +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go new file mode 100644 index 00000000..020e4f51 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -0,0 +1,197 @@ +// +build windows + +package ole + +import ( + "syscall" + "time" + "unsafe" +) + +func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { + wnames := make([]*uint16, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + dispid = make([]int32, len(names)) + namelen := uint32(len(names)) + hr, _, _ := syscall.Syscall6( + disp.VTable().GetIDsOfNames, + 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(namelen), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfoCount, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&c)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfo, + 3, + uintptr(unsafe.Pointer(disp)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&tinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch vv := v.(type) { + case bool: + if vv { + vargs[n] = NewVariant(VT_BOOL, 0xffff) + } else { + vargs[n] = NewVariant(VT_BOOL, 0) + } + case *bool: + vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) + case uint8: + vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) + case *uint8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int8: + vargs[n] = NewVariant(VT_I1, int64(v.(int8))) + case *int8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int16: + vargs[n] = NewVariant(VT_I2, int64(v.(int16))) + case *int16: + vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) + case uint16: + vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) + case *uint16: + vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) + case int32: + vargs[n] = NewVariant(VT_I4, int64(v.(int32))) + case *int32: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) + case uint32: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) + case *uint32: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) + case int64: + vargs[n] = NewVariant(VT_I8, int64(v.(int64))) + case *int64: + vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) + case uint64: + vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) + case *uint64: + vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) + case int: + vargs[n] = NewVariant(VT_I4, int64(v.(int))) + case *int: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) + case uint: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) + case *uint: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) + case float32: + vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) + case *float32: + vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) + case float64: + vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) + case *float64: + vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case string: + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) + case *string: + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) + case time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) + case *time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) + case *IDispatch: + vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) + case **IDispatch: + vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) + case nil: + vargs[n] = NewVariant(VT_NULL, 0) + case *VARIANT: + vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) + case []byte: + safeByteArray := safeArrayFromByteSlice(v.([]byte)) + vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + default: + panic("unknown type") + } + } + dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.cArgs = uint32(len(params)) + } + + result = new(VARIANT) + var excepInfo EXCEPINFO + VariantInit(result) + hr, _, _ := syscall.Syscall9( + disp.VTable().Invoke, + 9, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(result)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo) + } + for i, varg := range vargs { + n := len(params) - i - 1 + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { + *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) + } + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go new file mode 100644 index 00000000..24338975 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go @@ -0,0 +1,19 @@ +package ole + +import "unsafe" + +type IEnumVARIANT struct { + IUnknown +} + +type IEnumVARIANTVtbl struct { + IUnknownVtbl + Next uintptr + Skip uintptr + Reset uintptr + Clone uintptr +} + +func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { + return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go new file mode 100644 index 00000000..c1484819 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { + return nil, NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Reset() error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Skip(celt uint) error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { + return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go new file mode 100644 index 00000000..4781f3b8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go @@ -0,0 +1,63 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Clone, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(unsafe.Pointer(&cloned)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Reset() (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Reset, + 1, + uintptr(unsafe.Pointer(enum)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Skip(celt uint) (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Skip, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { + hr, _, _ := syscall.Syscall6( + enum.VTable().Next, + 4, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + uintptr(unsafe.Pointer(&array)), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go new file mode 100644 index 00000000..f4a19e25 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable.go @@ -0,0 +1,18 @@ +package ole + +import "unsafe" + +type IInspectable struct { + IUnknown +} + +type IInspectableVtbl struct { + IUnknownVtbl + GetIIds uintptr + GetRuntimeClassName uintptr + GetTrustLevel uintptr +} + +func (v *IInspectable) VTable() *IInspectableVtbl { + return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go new file mode 100644 index 00000000..348829bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go @@ -0,0 +1,15 @@ +// +build !windows + +package ole + +func (v *IInspectable) GetIids() ([]*GUID, error) { + return []*GUID{}, NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetRuntimeClassName() (string, error) { + return "", NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetTrustLevel() (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go new file mode 100644 index 00000000..4519a4aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package ole + +import ( + "bytes" + "encoding/binary" + "reflect" + "syscall" + "unsafe" +) + +func (v *IInspectable) GetIids() (iids []*GUID, err error) { + var count uint32 + var array uintptr + hr, _, _ := syscall.Syscall( + v.VTable().GetIIds, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&count)), + uintptr(unsafe.Pointer(&array))) + if hr != 0 { + err = NewError(hr) + return + } + defer CoTaskMemFree(array) + + iids = make([]*GUID, count) + byteCount := count * uint32(unsafe.Sizeof(GUID{})) + slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} + byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) + reader := bytes.NewReader(byteSlice) + for i := range iids { + guid := GUID{} + err = binary.Read(reader, binary.LittleEndian, &guid) + if err != nil { + return + } + iids[i] = &guid + } + return +} + +func (v *IInspectable) GetRuntimeClassName() (s string, err error) { + var hstring HString + hr, _, _ := syscall.Syscall( + v.VTable().GetRuntimeClassName, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&hstring)), + 0) + if hr != 0 { + err = NewError(hr) + return + } + s = hstring.String() + DeleteHString(hstring) + return +} + +func (v *IInspectable) GetTrustLevel() (level uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().GetTrustLevel, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&level)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go new file mode 100644 index 00000000..25f3a6f2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go @@ -0,0 +1,21 @@ +package ole + +import "unsafe" + +type IProvideClassInfo struct { + IUnknown +} + +type IProvideClassInfoVtbl struct { + IUnknownVtbl + GetClassInfo uintptr +} + +func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { + return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { + cinfo, err = getClassInfo(v) + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go new file mode 100644 index 00000000..7e3cb63e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go new file mode 100644 index 00000000..2ad01639 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetClassInfo, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&tinfo)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go new file mode 100644 index 00000000..dd3c5e21 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go @@ -0,0 +1,34 @@ +package ole + +import "unsafe" + +type ITypeInfo struct { + IUnknown +} + +type ITypeInfoVtbl struct { + IUnknownVtbl + GetTypeAttr uintptr + GetTypeComp uintptr + GetFuncDesc uintptr + GetVarDesc uintptr + GetNames uintptr + GetRefTypeOfImplType uintptr + GetImplTypeFlags uintptr + GetIDsOfNames uintptr + Invoke uintptr + GetDocumentation uintptr + GetDllEntry uintptr + GetRefTypeInfo uintptr + AddressOfMember uintptr + CreateInstance uintptr + GetMops uintptr + GetContainingTypeLib uintptr + ReleaseTypeAttr uintptr + ReleaseFuncDesc uintptr + ReleaseVarDesc uintptr +} + +func (v *ITypeInfo) VTable() *ITypeInfoVtbl { + return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go new file mode 100644 index 00000000..8364a659 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go new file mode 100644 index 00000000..54782b3d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { + hr, _, _ := syscall.Syscall( + uintptr(v.VTable().GetTypeAttr), + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&tattr)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go new file mode 100644 index 00000000..108f28ea --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown.go @@ -0,0 +1,57 @@ +package ole + +import "unsafe" + +type IUnknown struct { + RawVTable *interface{} +} + +type IUnknownVtbl struct { + QueryInterface uintptr + AddRef uintptr + Release uintptr +} + +type UnknownLike interface { + QueryInterface(iid *GUID) (disp *IDispatch, err error) + AddRef() int32 + Release() int32 +} + +func (v *IUnknown) VTable() *IUnknownVtbl { + return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + +func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { + return queryInterface(v, iid) +} + +func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { + unk, err := queryInterface(v, iid) + if err != nil { + panic(err) + } + return unk +} + +func (v *IUnknown) AddRef() int32 { + return addRef(v) +} + +func (v *IUnknown) Release() int32 { + return release(v) +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go new file mode 100644 index 00000000..d0a62cfd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + return nil, NewError(E_NOTIMPL) +} + +func addRef(unk *IUnknown) int32 { + return 0 +} + +func release(unk *IUnknown) int32 { + return 0 +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go new file mode 100644 index 00000000..ede5bb8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unsafe" +) + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + selfValue := reflect.ValueOf(self).Elem() + objValue := reflect.ValueOf(obj).Elem() + + hr, _, _ := syscall.Syscall( + method, + 3, + selfValue.UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + objValue.Addr().Pointer()) + if hr != 0 { + err = NewError(hr) + } + return +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + hr, _, _ := syscall.Syscall( + unk.VTable().QueryInterface, + 3, + uintptr(unsafe.Pointer(unk)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func addRef(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().AddRef, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} + +func release(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().Release, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go new file mode 100644 index 00000000..e2ae4f4b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ole.go @@ -0,0 +1,157 @@ +package ole + +import ( + "fmt" + "strings" +) + +// DISPPARAMS are the arguments that passed to methods or property. +type DISPPARAMS struct { + rgvarg uintptr + rgdispidNamedArgs uintptr + cArgs uint32 + cNamedArgs uint32 +} + +// EXCEPINFO defines exception info. +type EXCEPINFO struct { + wCode uint16 + wReserved uint16 + bstrSource *uint16 + bstrDescription *uint16 + bstrHelpFile *uint16 + dwHelpContext uint32 + pvReserved uintptr + pfnDeferredFillIn uintptr + scode uint32 +} + +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + +// String convert EXCEPINFO to string. +func (e EXCEPINFO) String() string { + var src, desc, hlp string + if e.bstrSource == nil { + src = "" + } else { + src = BstrToString(e.bstrSource) + } + + if e.bstrDescription == nil { + desc = "" + } else { + desc = BstrToString(e.bstrDescription) + } + + if e.bstrHelpFile == nil { + hlp = "" + } else { + hlp = BstrToString(e.bstrHelpFile) + } + + return fmt.Sprintf( + "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", + e.wCode, src, desc, hlp, e.dwHelpContext, e.scode, + ) +} + +// Error implements error interface and returns error string. +func (e EXCEPINFO) Error() string { + if e.bstrDescription != nil { + return strings.TrimSpace(BstrToString(e.bstrDescription)) + } + + src := "Unknown" + if e.bstrSource != nil { + src = BstrToString(e.bstrSource) + } + + code := e.scode + if e.wCode != 0 { + code = uint32(e.wCode) + } + + return fmt.Sprintf("%v: %#x", src, code) +} + +// PARAMDATA defines parameter data type. +type PARAMDATA struct { + Name *int16 + Vt uint16 +} + +// METHODDATA defines method info. +type METHODDATA struct { + Name *uint16 + Data *PARAMDATA + Dispid int32 + Meth uint32 + CC int32 + CArgs uint32 + Flags uint16 + VtReturn uint32 +} + +// INTERFACEDATA defines interface info. +type INTERFACEDATA struct { + MethodData *METHODDATA + CMembers uint32 +} + +// Point is 2D vector type. +type Point struct { + X int32 + Y int32 +} + +// Msg is message between processes. +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +// TYPEDESC defines data type. +type TYPEDESC struct { + Hreftype uint32 + VT uint16 +} + +// IDLDESC defines IDL info. +type IDLDESC struct { + DwReserved uint32 + WIDLFlags uint16 +} + +// TYPEATTR defines type info. +type TYPEATTR struct { + Guid GUID + Lcid uint32 + dwReserved uint32 + MemidConstructor int32 + MemidDestructor int32 + LpstrSchema *uint16 + CbSizeInstance uint32 + Typekind int32 + CFuncs uint16 + CVars uint16 + CImplTypes uint16 + CbSizeVft uint16 + CbAlignment uint16 + WTypeFlags uint16 + WMajorVerNum uint16 + WMinorVerNum uint16 + TdescAlias TYPEDESC + IdldescType IDLDESC +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go new file mode 100644 index 00000000..60df73cd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go @@ -0,0 +1,100 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +type stdDispatch struct { + lpVtbl *stdDispatchVtbl + ref int32 + iid *ole.GUID + iface interface{} + funcMap map[string]int32 +} + +type stdDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + *punk = nil + if ole.IsEqualGUID(iid, ole.IID_IUnknown) || + ole.IsEqualGUID(iid, ole.IID_IDispatch) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + if ole.IsEqualGUID(iid, pthis.iid) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + return ole.E_NOINTERFACE +} + +func dispAddRef(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref++ + return pthis.ref +} + +func dispRelease(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref-- + return pthis.ref +} + +func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + names := make([]string, len(wnames)) + for i := 0; i < len(names); i++ { + names[i] = ole.LpOleStrToString(wnames[i]) + } + for n := 0; n < namelen; n++ { + if id, ok := pthis.funcMap[names[n]]; ok { + pdisp[n] = id + } + } + return ole.S_OK +} + +func dispGetTypeInfoCount(pcount *int) uintptr { + if pcount != nil { + *pcount = 0 + } + return ole.S_OK +} + +func dispGetTypeInfo(ptypeif *uintptr) uintptr { + return ole.E_NOTIMPL +} + +func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + found := "" + for name, id := range pthis.funcMap { + if id == dispid { + found = name + } + } + if found != "" { + rv := reflect.ValueOf(pthis.iface).Elem() + rm := rv.MethodByName(found) + rr := rm.Call([]reflect.Value{}) + println(len(rr)) + return ole.S_OK + } + return ole.E_NOTIMPL +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go new file mode 100644 index 00000000..8818fb82 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go @@ -0,0 +1,10 @@ +// +build !windows + +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { + return 0, ole.NewError(ole.E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go new file mode 100644 index 00000000..ab9c0d8d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "syscall" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { + unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) + if err != nil { + return + } + + container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) + var point *ole.IConnectionPoint + err = container.FindConnectionPoint(iid, &point) + if err != nil { + return + } + if edisp, ok := idisp.(*ole.IUnknown); ok { + cookie, err = point.Advise(edisp) + container.Release() + if err != nil { + return + } + } + rv := reflect.ValueOf(disp).Elem() + if rv.Type().Kind() == reflect.Struct { + dest := &stdDispatch{} + dest.lpVtbl = &stdDispatchVtbl{} + dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) + dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) + dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) + dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) + dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) + dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) + dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) + dest.iface = disp + dest.iid = iid + cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) + container.Release() + if err != nil { + point.Release() + return + } + return + } + + container.Release() + + return 0, ole.NewError(ole.E_INVALIDARG) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go new file mode 100644 index 00000000..58347628 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go @@ -0,0 +1,6 @@ +// This file is here so go get succeeds as without it errors with: +// no buildable Go source files in ... +// +// +build !windows + +package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go new file mode 100644 index 00000000..f7803c1e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -0,0 +1,127 @@ +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +func ClassIDFrom(programID string) (classID *ole.GUID, err error) { + return ole.ClassIDFrom(programID) +} + +// CreateObject creates object from programID based on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func CreateObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// GetActiveObject retrieves active object for program ID and interface ID based +// on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// CallMethod calls method on IDispatch with parameters. +func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) +} + +// MustCallMethod calls method on IDispatch with parameters or panics. +func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := CallMethod(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// GetProperty retrieves property from IDispatch. +func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) +} + +// MustGetProperty retrieves property from IDispatch or panics. +func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := GetProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutProperty mutates property. +func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) +} + +// MustPutProperty mutates property or panics. +func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { + newEnum, err := disp.GetProperty("_NewEnum") + if err != nil { + return err + } + defer newEnum.Clear() + + enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + defer enum.Release() + + for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { + if err != nil { + return err + } + if ferr := f(&item); ferr != nil { + return ferr + } + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go new file mode 100644 index 00000000..a5201b56 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray.go @@ -0,0 +1,27 @@ +// Package is meant to retrieve and process safe array data returned from COM. + +package ole + +// SafeArrayBound defines the SafeArray boundaries. +type SafeArrayBound struct { + Elements uint32 + LowerBound int32 +} + +// SafeArray is how COM handles arrays. +type SafeArray struct { + Dimensions uint16 + FeaturesFlag uint16 + ElementsSize uint32 + LocksAmount uint32 + Data uint32 + Bounds [16]byte +} + +// SAFEARRAY is obsolete, exists for backwards compatibility. +// Use SafeArray +type SAFEARRAY SafeArray + +// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. +// Use SafeArrayBound +type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go new file mode 100644 index 00000000..8ff0baa4 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -0,0 +1,211 @@ +// +build !windows + +package ole + +import ( + "unsafe" +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { + return uintptr(0), NewError(E_NOTIMPL) +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int64) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) { + return int64(0), NewError(E_NOTIMPL) +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int64, error) { + return int64(0), NewError(E_NOTIMPL) +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { + return uint16(0), NewError(E_NOTIMPL) +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go new file mode 100644 index 00000000..b27936e2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -0,0 +1,337 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +var ( + procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData") + procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy") + procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData") + procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate") + procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx") + procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy") + procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim") + procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement") + procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize") + procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID") + procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound") + procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound") + procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype") + procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock") + procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData") + procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock") + procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") + //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO + //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +// Todo: Test +func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { + err = convertHresultToError( + procSafeArrayAccessData.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&element)))) + return +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptorEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayCopy.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { + err = convertHresultToError( + procSafeArrayCopyData.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(duplicate)))) + return +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreate.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds))) + safearray = (*SafeArray)(unsafe.Pointer(&sa)) + return +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds)), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVector.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length)) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVectorEx.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { + l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) + dimensions = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { + l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) + length = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { + return convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(pv))) +} + +// safeArrayGetElementString retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, err error) { + var element *int16 + err = convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(&element)))) + str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) + SysFreeString(element) + return +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { + err = convertHresultToError( + procSafeArrayGetIID.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&guid)))) + return +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int64, err error) { + err = convertHresultToError( + procSafeArrayGetLBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&lowerBound)))) + return +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int64, err error) { + err = convertHresultToError( + procSafeArrayGetUBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&upperBound)))) + return +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { + err = convertHresultToError( + procSafeArrayGetVartype.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&varType)))) + return +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { + err = convertHresultToError( + procSafeArrayPutElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(element)))) + return +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { + err = convertHresultToError( + procSafeArrayGetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { + err = convertHresultToError( + procSafeArraySetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go new file mode 100644 index 00000000..ffeb2b97 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -0,0 +1,140 @@ +// Helper for converting SafeArray to array of objects. + +package ole + +import ( + "unsafe" +) + +type SafeArrayConversion struct { + Array *SafeArray +} + +func (sac *SafeArrayConversion) ToStringArray() (strings []string) { + totalElements, _ := sac.TotalElements(0) + strings = make([]string, totalElements) + + for i := int64(0); i < totalElements; i++ { + strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) + } + + return +} + +func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { + totalElements, _ := sac.TotalElements(0) + bytes = make([]byte, totalElements) + + for i := int64(0); i < totalElements; i++ { + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) + } + + return +} + +func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { + totalElements, _ := sac.TotalElements(0) + values = make([]interface{}, totalElements) + vt, _ := safeArrayGetVartype(sac.Array) + + for i := 0; i < int(totalElements); i++ { + switch VT(vt) { + case VT_BOOL: + var v bool + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_I1: + var v int8 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_I2: + var v int16 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_I4: + var v int32 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_I8: + var v int64 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_UI1: + var v uint8 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_UI2: + var v uint16 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_UI4: + var v uint32 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_UI8: + var v uint64 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_R4: + var v float32 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_R8: + var v float64 + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_BSTR: + var v string + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v + case VT_VARIANT: + var v VARIANT + safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + values[i] = v.Value() + default: + // TODO + } + } + + return +} + +func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { + return safeArrayGetVartype(sac.Array) +} + +func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { + return safeArrayGetDim(sac.Array) +} + +func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { + return safeArrayGetElementSize(sac.Array) +} + +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int64, err error) { + if index < 1 { + index = 1 + } + + // Get array bounds + var LowerBounds int64 + var UpperBounds int64 + + LowerBounds, err = safeArrayGetLBound(sac.Array, index) + if err != nil { + return + } + + UpperBounds, err = safeArrayGetUBound(sac.Array, index) + if err != nil { + return + } + + totalElements = UpperBounds - LowerBounds + 1 + return +} + +// Release Safe Array memory +func (sac *SafeArrayConversion) Release() { + safeArrayDestroy(sac.Array) +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go new file mode 100644 index 00000000..a9fa885f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go @@ -0,0 +1,33 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +func safeArrayFromByteSlice(slice []byte) *SafeArray { + array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []byte to SAFEARRAY") + } + + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) + } + return array +} + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go new file mode 100644 index 00000000..99ee82dc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/utility.go @@ -0,0 +1,101 @@ +package ole + +import ( + "unicode/utf16" + "unsafe" +) + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +// +// Helper that provides check against both Class ID from Program ID and Class ID from string. It is +// faster, if you know which you are using, to use the individual functions, but this will check +// against available functions for you. +func ClassIDFrom(programID string) (classID *GUID, err error) { + classID, err = CLSIDFromProgID(programID) + if err != nil { + classID, err = CLSIDFromString(programID) + if err != nil { + return + } + } + return +} + +// BytePtrToString converts byte pointer to a Go string. +func BytePtrToString(p *byte) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// UTF16PtrToString is alias for LpOleStrToString. +// +// Kept for compatibility reasons. +func UTF16PtrToString(p *uint16) string { + return LpOleStrToString(p) +} + +// LpOleStrToString converts COM Unicode to Go string. +func LpOleStrToString(p *uint16) string { + if p == nil { + return "" + } + + length := lpOleStrLen(p) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + + return string(utf16.Decode(a)) +} + +// BstrToString converts COM binary string to Go string. +func BstrToString(p *uint16) string { + if p == nil { + return "" + } + length := SysStringLen((*int16)(unsafe.Pointer(p))) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return string(utf16.Decode(a)) +} + +// lpOleStrLen returns the length of Unicode string. +func lpOleStrLen(p *uint16) (length int64) { + if p == nil { + return 0 + } + + ptr := unsafe.Pointer(p) + + for i := 0; ; i++ { + if 0 == *(*uint16)(ptr) { + length = int64(i) + break + } + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return +} + +// convertHresultToError converts syscall to error, if call is unsuccessful. +func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go new file mode 100644 index 00000000..ebe00f1c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variables.go @@ -0,0 +1,16 @@ +// +build windows + +package ole + +import ( + "syscall" +) + +var ( + modcombase = syscall.NewLazyDLL("combase.dll") + modkernel32, _ = syscall.LoadDLL("kernel32.dll") + modole32, _ = syscall.LoadDLL("ole32.dll") + modoleaut32, _ = syscall.LoadDLL("oleaut32.dll") + modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll") + moduser32, _ = syscall.LoadDLL("user32.dll") +) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go new file mode 100644 index 00000000..36969725 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -0,0 +1,105 @@ +package ole + +import "unsafe" + +// NewVariant returns new variant based on type and value. +func NewVariant(vt VT, val int64) VARIANT { + return VARIANT{VT: vt, Val: val} +} + +// ToIUnknown converts Variant to Unknown object. +func (v *VARIANT) ToIUnknown() *IUnknown { + if v.VT != VT_UNKNOWN { + return nil + } + return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToIDispatch converts variant to dispatch object. +func (v *VARIANT) ToIDispatch() *IDispatch { + if v.VT != VT_DISPATCH { + return nil + } + return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToArray converts variant to SafeArray helper. +func (v *VARIANT) ToArray() *SafeArrayConversion { + if v.VT != VT_SAFEARRAY { + if v.VT&VT_ARRAY == 0 { + return nil + } + } + var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) + return &SafeArrayConversion{safeArray} +} + +// ToString converts variant to Go string. +func (v *VARIANT) ToString() string { + if v.VT != VT_BSTR { + return "" + } + return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) +} + +// Clear the memory of variant object. +func (v *VARIANT) Clear() error { + return VariantClear(v) +} + +// Value returns variant value based on its type. +// +// Currently supported types: 2- and 4-byte integers, strings, bools. +// Note that 64-bit integers, datetimes, and other types are stored as strings +// and will be returned as strings. +// +// Needs to be further converted, because this returns an interface{}. +func (v *VARIANT) Value() interface{} { + switch v.VT { + case VT_I1: + return int8(v.Val) + case VT_UI1: + return uint8(v.Val) + case VT_I2: + return int16(v.Val) + case VT_UI2: + return uint16(v.Val) + case VT_I4: + return int32(v.Val) + case VT_UI4: + return uint32(v.Val) + case VT_I8: + return int64(v.Val) + case VT_UI8: + return uint64(v.Val) + case VT_INT: + return int(v.Val) + case VT_UINT: + return uint(v.Val) + case VT_INT_PTR: + return uintptr(v.Val) // TODO + case VT_UINT_PTR: + return uintptr(v.Val) + case VT_R4: + return *(*float32)(unsafe.Pointer(&v.Val)) + case VT_R8: + return *(*float64)(unsafe.Pointer(&v.Val)) + case VT_BSTR: + return v.ToString() + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := float64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return d + } + return date + case VT_UNKNOWN: + return v.ToIUnknown() + case VT_DISPATCH: + return v.ToIDispatch() + case VT_BOOL: + return v.Val != 0 + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go new file mode 100644 index 00000000..e73736bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_386.go @@ -0,0 +1,11 @@ +// +build 386 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go new file mode 100644 index 00000000..dccdde13 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go @@ -0,0 +1,12 @@ +// +build amd64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 00000000..9874ca66 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go new file mode 100644 index 00000000..729b4a04 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/vt_string.go @@ -0,0 +1,58 @@ +// generated by stringer -output vt_string.go -type VT; DO NOT EDIT + +package ole + +import "fmt" + +const ( + _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" + _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" + _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" + _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" + _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" + _VT_name_5 = "VT_ARRAY" + _VT_name_6 = "VT_BYREF" + _VT_name_7 = "VT_RESERVED" + _VT_name_8 = "VT_ILLEGAL" +) + +var ( + _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} + _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} + _VT_index_2 = [...]uint8{0, 9, 19, 30} + _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} + _VT_index_4 = [...]uint8{0, 12, 21} + _VT_index_5 = [...]uint8{0, 8} + _VT_index_6 = [...]uint8{0, 8} + _VT_index_7 = [...]uint8{0, 11} + _VT_index_8 = [...]uint8{0, 10} +) + +func (i VT) String() string { + switch { + case 0 <= i && i <= 14: + return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] + case 16 <= i && i <= 31: + i -= 16 + return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] + case 36 <= i && i <= 38: + i -= 36 + return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] + case 64 <= i && i <= 72: + i -= 64 + return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] + case 4095 <= i && i <= 4096: + i -= 4095 + return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] + case i == 8192: + return _VT_name_5 + case i == 16384: + return _VT_name_6 + case i == 32768: + return _VT_name_7 + case i == 65535: + return _VT_name_8 + default: + return fmt.Sprintf("VT(%d)", i) + } +} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go new file mode 100644 index 00000000..4e9eca73 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt.go @@ -0,0 +1,99 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unicode/utf8" + "unsafe" +) + +var ( + procRoInitialize = modcombase.NewProc("RoInitialize") + procRoActivateInstance = modcombase.NewProc("RoActivateInstance") + procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") + procWindowsCreateString = modcombase.NewProc("WindowsCreateString") + procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") + procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") +) + +func RoInitialize(thread_type uint32) (err error) { + hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoActivateInstance.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoGetActivationFactory.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + u16 := syscall.StringToUTF16Ptr(s) + len := uint32(utf8.RuneCountInString(s)) + hr, _, _ := procWindowsCreateString.Call( + uintptr(unsafe.Pointer(u16)), + uintptr(len), + uintptr(unsafe.Pointer(&hstring))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// String returns Go string value of HString. +func (h HString) String() string { + var u16buf uintptr + var u16len uint32 + u16buf, _, _ = procWindowsGetStringRawBuffer.Call( + uintptr(h), + uintptr(unsafe.Pointer(&u16len))) + + u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} + u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) + return syscall.UTF16ToString(u16) +} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go new file mode 100644 index 00000000..52e6d74c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go @@ -0,0 +1,36 @@ +// +build !windows + +package ole + +// RoInitialize +func RoInitialize(thread_type uint32) (err error) { + return NewError(E_NOTIMPL) +} + +// RoActivateInstance +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// RoGetActivationFactory +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + return HString(uintptr(0)), NewError(E_NOTIMPL) +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + return NewError(E_NOTIMPL) +} + +// String returns Go string value of HString. +func (h HString) String() string { + return "" +} diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 00000000..2de28da1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,9 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db +.idea diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 00000000..cc1268c3 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,107 @@ +sudo: false +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - master + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + +before_script: + - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf + - sudo service mysql restart + - .travis/wait_mysql.sh + - mysql -e 'create database gotest;' + +matrix: + include: + - env: DB=MYSQL8 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mysql:8.0 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MYSQL57 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mysql:5.7 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MARIA55 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mariadb:5.5 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MARIA10_1 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mariadb:10.1 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - go vet ./... + - .travis/gofmt.sh +after_script: + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 00000000..73ff68fb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,89 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Achille Roussel +Alexey Palazhchenko +Andrew Reid +Arne Hormann +Asta Xie +Bulat Gaifullin +Carlos Nieto +Chris Moos +Craig Wilson +Daniel Montoya +Daniel Nichter +Daniël van Eeden +Dave Protasowski +DisposaBoy +Egor Smolyakov +Evan Shaw +Frederick Mayle +Gustavo Kristic +Hajime Nakagami +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +ICHINOSE Shogo +INADA Naoki +Jacek Szwec +James Harr +Jeff Hodges +Jeffrey Charles +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Justin Li +Justin Nuß +Kamil Dziedzic +Kevin Malachowski +Kieron Woodhouse +Lennart Rudolph +Leonardo YongUk Kim +Linh Tran Tuan +Lion Yang +Luca Looz +Lucas Liu +Luke Scott +Maciej Zimnoch +Michael Woolnough +Nicola Peduzzi +Olivier Mengué +oscarzhao +Paul Bonser +Peter Schultz +Rebecca Chin +Reed Allman +Richard Wilkes +Robert Russell +Runrioter Wung +Shuode Li +Soroush Pour +Stan Putrya +Stanley Gunawan +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Zhenye Xie + +# Organizations + +Barracuda Networks, Inc. +Counting Ltd. +Google Inc. +InfoSum Ltd. +Keybase Inc. +Percona LLC +Pivotal Inc. +Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 00000000..ce1b5330 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,178 @@ +## Version 1.4.1 (2018-11-14) + +Bugfixes: + + - Fix TIME format for binary columns (#818) + - Fix handling of empty auth plugin names (#835) + - Fix caching_sha2_password with empty password (#826) + - Fix canceled context broke mysqlConn (#862) + - Fix OldAuthSwitchRequest support (#870) + - Fix Auth Response packet for cleartext password (#887) + +## Version 1.4 (2018-06-03) + +Changes: + + - Documentation fixes (#530, #535, #567) + - Refactoring (#575, #579, #580, #581, #603, #615, #704) + - Cache column names (#444) + - Sort the DSN parameters in DSNs generated from a config (#637) + - Allow native password authentication by default (#644) + - Use the default port if it is missing in the DSN (#668) + - Removed the `strict` mode (#676) + - Do not query `max_allowed_packet` by default (#680) + - Dropped support Go 1.6 and lower (#696) + - Updated `ConvertValue()` to match the database/sql/driver implementation (#760) + - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783) + - Improved the compatibility of the authentication system (#807) + +New Features: + + - Multi-Results support (#537) + - `rejectReadOnly` DSN option (#604) + - `context.Context` support (#608, #612, #627, #761) + - Transaction isolation level support (#619, #744) + - Read-Only transactions support (#618, #634) + - `NewConfig` function which initializes a config with default values (#679) + - Implemented the `ColumnType` interfaces (#667, #724) + - Support for custom string types in `ConvertValue` (#623) + - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710) + - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802) + - Implemented `driver.SessionResetter` (#779) + - `sha256_password` authentication plugin support (#808) + +Bugfixes: + + - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718) + - Fixed LOAD LOCAL DATA INFILE for empty files (#590) + - Removed columns definition cache since it sometimes cached invalid data (#592) + - Don't mutate registered TLS configs (#600) + - Make RegisterTLSConfig concurrency-safe (#613) + - Handle missing auth data in the handshake packet correctly (#646) + - Do not retry queries when data was written to avoid data corruption (#302, #736) + - Cache the connection pointer for error handling before invalidating it (#678) + - Fixed imports for appengine/cloudsql (#700) + - Fix sending STMT_LONG_DATA for 0 byte data (#734) + - Set correct capacity for []bytes read from length-encoded strings (#766) + - Make RegisterDial concurrency-safe (#773) + + +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 00000000..8fe16bcb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 00000000..14e2f777 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 00000000..2e9b07ee --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,490 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [Connection pool and timeouts](#connection-pool-and-timeouts) + * [context.Context Support](#contextcontext-support) + * [ColumnType Support](#columntype-support) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.7 or higher. We aim to support the 3 latest versions of Go. + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get -u github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host[:port]`. +If `port` is omitted, the default port will be used. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: true +``` +`allowNativePasswords=false` disallows the usage of MySQL native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 4194304 +``` + +Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` +The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`. + + +##### `readTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `rejectReadOnly` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + + +`rejectReadOnly=true` causes the driver to reject read-only connections. This +is for a possible race condition during an automatic failover, where the mysql +client gets connected to a read-only replica after the failover. + +Note that this should be a fairly rare case, as an automatic failover normally +happens when the primary is down, and the race condition shouldn't happen +unless it comes back up online as soon as the failover is kicked off. On the +other hand, when this happens, a MySQL application can get stuck on a +read-only connection until restarted. It is however fairly easy to reproduce, +for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. + +If you are not relying on read-only transactions to reject writes that aren't +supposed to happen, setting this on some MySQL providers (such as AWS Aurora) +is safer for failovers. + +Note that ERROR 1290 can be returned for a `read-only` server and this option will +cause a retry for that error. However the same error number is used for some +other cases. You should ensure your application will never cause an ERROR 1290 +except for `read-only` mode when enabling this option. + + +##### `serverPubKey` + +``` +Type: string +Valid Values: +Default: none +``` + +Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN. +Public keys are used to transmit encrypted data, e.g. for authentication. +If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required. + + +##### `timeout` + +``` +Type: duration +Default: OS default +``` + +Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### `writeTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with `'`. +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`). + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine (First Generation MySQL Server): +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +Google Cloud SQL on App Engine (Second Generation MySQL Server): +``` +user@cloudsql(project-id:regionname:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + + +### Connection pool and timeouts +The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. + +## `ColumnType` Support +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. + +## `context.Context` Support +Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. +See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. + + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially. + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. + +Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 00000000..be41f2ee --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "google.golang.org/appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go new file mode 100644 index 00000000..14f678a8 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -0,0 +1,420 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "sync" +) + +// server pub keys registry +var ( + serverPubKeyLock sync.RWMutex + serverPubKeyRegistry map[string]*rsa.PublicKey +) + +// RegisterServerPubKey registers a server RSA public key which can be used to +// send data in a secure manner to the server without receiving the public key +// in a potentially insecure way from the server first. +// Registered keys can afterwards be used adding serverPubKey= to the DSN. +// +// Note: The provided rsa.PublicKey instance is exclusively owned by the driver +// after registering it and may not be modified. +// +// data, err := ioutil.ReadFile("mykey.pem") +// if err != nil { +// log.Fatal(err) +// } +// +// block, _ := pem.Decode(data) +// if block == nil || block.Type != "PUBLIC KEY" { +// log.Fatal("failed to decode PEM block containing public key") +// } +// +// pub, err := x509.ParsePKIXPublicKey(block.Bytes) +// if err != nil { +// log.Fatal(err) +// } +// +// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok { +// mysql.RegisterServerPubKey("mykey", rsaPubKey) +// } else { +// log.Fatal("not a RSA public key") +// } +// +func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry == nil { + serverPubKeyRegistry = make(map[string]*rsa.PublicKey) + } + + serverPubKeyRegistry[name] = pubKey + serverPubKeyLock.Unlock() +} + +// DeregisterServerPubKey removes the public key registered with the given name. +func DeregisterServerPubKey(name string) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry != nil { + delete(serverPubKeyRegistry, name) + } + serverPubKeyLock.Unlock() +} + +func getServerPubKey(name string) (pubKey *rsa.PublicKey) { + serverPubKeyLock.RLock() + if v, ok := serverPubKeyRegistry[name]; ok { + pubKey = v + } + serverPubKeyLock.RUnlock() + return +} + +// Hash password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Hash password using insecure pre 4.1 method +func scrambleOldPassword(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash([]byte(password)) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +// Hash password using 4.1+ method (SHA1) +func scramblePassword(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write([]byte(password)) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Hash password using MySQL 8+ method (SHA256) +func scrambleSHA256Password(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble)) + + crypt := sha256.New() + crypt.Write([]byte(password)) + message1 := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1) + message1Hash := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1Hash) + crypt.Write(scramble) + message2 := crypt.Sum(nil) + + for i := range message1 { + message1[i] ^= message2[i] + } + + return message1 +} + +func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) { + plain := make([]byte, len(password)+1) + copy(plain, password) + for i := range plain { + j := i % len(seed) + plain[i] ^= seed[j] + } + sha1 := sha1.New() + return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil) +} + +func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error { + enc, err := encryptPassword(mc.cfg.Passwd, seed, pub) + if err != nil { + return err + } + return mc.writeAuthSwitchPacket(enc) +} + +func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { + switch plugin { + case "caching_sha2_password": + authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) + return authResp, nil + + case "mysql_old_password": + if !mc.cfg.AllowOldPasswords { + return nil, ErrOldPassword + } + // Note: there are edge cases where this should work but doesn't; + // this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0) + return authResp, nil + + case "mysql_clear_password": + if !mc.cfg.AllowCleartextPasswords { + return nil, ErrCleartextPassword + } + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + return append([]byte(mc.cfg.Passwd), 0), nil + + case "mysql_native_password": + if !mc.cfg.AllowNativePasswords { + return nil, ErrNativePassword + } + // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html + // Native password authentication only need and will need 20-byte challenge. + authResp := scramblePassword(authData[:20], mc.cfg.Passwd) + return authResp, nil + + case "sha256_password": + if len(mc.cfg.Passwd) == 0 { + return []byte{0}, nil + } + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + return append([]byte(mc.cfg.Passwd), 0), nil + } + + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + return []byte{1}, nil + } + + // encrypted password + enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) + return enc, err + + default: + errLog.Print("unknown auth plugin:", plugin) + return nil, ErrUnknownPlugin + } +} + +func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { + // Read Result Packet + authData, newPlugin, err := mc.readAuthResult() + if err != nil { + return err + } + + // handle auth plugin switch, if requested + if newPlugin != "" { + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if authData == nil { + authData = oldAuthData + } else { + // copy data from read buffer to owned slice + copy(oldAuthData, authData) + } + + plugin = newPlugin + + authResp, err := mc.auth(authData, plugin) + if err != nil { + return err + } + if err = mc.writeAuthSwitchPacket(authResp); err != nil { + return err + } + + // Read Result Packet + authData, newPlugin, err = mc.readAuthResult() + if err != nil { + return err + } + + // Do not allow to change the auth plugin more than once + if newPlugin != "" { + return ErrMalformPkt + } + } + + switch plugin { + + // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/ + case "caching_sha2_password": + switch len(authData) { + case 0: + return nil // auth successful + case 1: + switch authData[0] { + case cachingSha2PasswordFastAuthSuccess: + if err = mc.readResultOK(); err == nil { + return nil // auth successful + } + + case cachingSha2PasswordPerformFullAuthentication: + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0)) + if err != nil { + return err + } + } else { + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + data := mc.buf.takeSmallBuffer(4 + 1) + data[4] = cachingSha2PasswordRequestPublicKey + mc.writePacket(data) + + // parse public key + data, err := mc.readPacket() + if err != nil { + return err + } + + block, _ := pem.Decode(data[1:]) + pkix, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + pubKey = pkix.(*rsa.PublicKey) + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pubKey) + if err != nil { + return err + } + } + return mc.readResultOK() + + default: + return ErrMalformPkt + } + default: + return ErrMalformPkt + } + + case "sha256_password": + switch len(authData) { + case 0: + return nil // auth successful + default: + block, _ := pem.Decode(authData) + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey)) + if err != nil { + return err + } + return mc.readResultOK() + } + + default: + return nil // auth successful + } + + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 00000000..eb4748bf --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,147 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + nc net.Conn + idx int + length int + timeout time.Duration +} + +func newBuffer(nc net.Conn) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + nc: nc, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length > 0 { + return nil + } + return b.buf[:length] +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length > 0 { + return nil + } + return b.buf +} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 00000000..136c9e4d --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,251 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8_general_ci" +const binaryCollation = "binary" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 00000000..e5706141 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,461 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "net" + "strconv" + "strings" + "time" +) + +// a copy of context.Context for Go 1.7 and earlier +type mysqlContext interface { + Done() <-chan struct{} + Err() error + + // defined in context.Context, but not used in this driver: + // Deadline() (deadline time.Time, ok bool) + // Value(key interface{}) interface{} +} + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + + // for context support (Go 1.8+) + watching bool + watcher chan<- mysqlContext + closech chan struct{} + finished chan<- struct{} + canceled atomicError // set non-nil if conn is canceled + closed atomicBool // set when conn is closed, before closech is closed +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.Params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) markBadConn(err error) error { + if mc == nil { + return err + } + if err != errBadConnNoWrite { + return err + } + return driver.ErrBadConn +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + return mc.begin(false) +} + +func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + var q string + if readOnly { + q = "START TRANSACTION READ ONLY" + } else { + q = "START TRANSACTION" + } + err := mc.exec(q) + if err == nil { + return &mysqlTx{mc}, err + } + return nil, mc.markBadConn(err) +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if !mc.closed.IsSet() { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + if !mc.closed.TrySet(true) { + return + } + + // Makes cleanup idempotent + close(mc.closech) + if mc.netConn == nil { + return + } + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } +} + +func (mc *mysqlConn) error() error { + if mc.closed.IsSet() { + if err := mc.canceled.Value(); err != nil { + return err + } + return ErrInvalidConn + } + return nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, mc.markBadConn(err) + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", ErrInvalidConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.Loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, mc.markBadConn(err) +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + if err := mc.writeCommandPacketStr(comQuery, query); err != nil { + return mc.markBadConn(err) + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + + return mc.discardResults() +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return mc.query(query, args) +} + +func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, mc.markBadConn(err) +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} + +// finish is called when the query has canceled. +func (mc *mysqlConn) cancel(err error) { + mc.canceled.Set(err) + mc.cleanup() +} + +// finish is called when the query has succeeded. +func (mc *mysqlConn) finish() { + if !mc.watching || mc.finished == nil { + return + } + select { + case mc.finished <- struct{}{}: + mc.watching = false + case <-mc.closech: + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go new file mode 100644 index 00000000..ce52c7d1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go @@ -0,0 +1,207 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// Ping implements driver.Pinger interface +func (mc *mysqlConn) Ping(ctx context.Context) (err error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + if err = mc.watchCancel(ctx); err != nil { + return + } + defer mc.finish() + + if err = mc.writeCommandPacket(comPing); err != nil { + return + } + + return mc.readResultOK() +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { + level, err := mapIsolationLevel(opts.Isolation) + if err != nil { + return nil, err + } + err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) + if err != nil { + return nil, err + } + } + + return mc.begin(opts.ReadOnly) +} + +func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + mc.finish() + return nil, err + } + rows.finish = mc.finish + return rows, err +} + +func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + return mc.Exec(query, dargs) +} + +func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + stmt, err := mc.Prepare(query) + mc.finish() + if err != nil { + return nil, err + } + + select { + default: + case <-ctx.Done(): + stmt.Close() + return nil, ctx.Err() + } + return stmt, nil +} + +func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + stmt.mc.finish() + return nil, err + } + rows.finish = stmt.mc.finish + return rows, err +} + +func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + defer stmt.mc.finish() + + return stmt.Exec(dargs) +} + +func (mc *mysqlConn) watchCancel(ctx context.Context) error { + if mc.watching { + // Reach here if canceled, + // so the connection is already invalid + mc.cleanup() + return nil + } + // When ctx is already cancelled, don't watch it. + if err := ctx.Err(); err != nil { + return err + } + // When ctx is not cancellable, don't watch it. + if ctx.Done() == nil { + return nil + } + // When watcher is not alive, can't watch it. + if mc.watcher == nil { + return nil + } + + mc.watching = true + mc.watcher <- ctx + return nil +} + +func (mc *mysqlConn) startWatcher() { + watcher := make(chan mysqlContext, 1) + mc.watcher = watcher + finished := make(chan struct{}) + mc.finished = finished + go func() { + for { + var ctx mysqlContext + select { + case ctx = <-watcher: + case <-mc.closech: + return + } + + select { + case <-ctx.Done(): + mc.cancel(ctx.Err()) + case <-finished: + case <-mc.closech: + return + } + } + }() +} + +func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +// ResetSession implements driver.SessionResetter. +// (From Go 1.10) +func (mc *mysqlConn) ResetSession(ctx context.Context) error { + if mc.closed.IsSet() { + return driver.ErrBadConn + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 00000000..b1e6b85e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,174 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + defaultAuthPlugin = "mysql_native_password" + defaultMaxAllowedPacket = 4 << 20 // 4 MiB + minProtocolVersion = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iAuthMoreData byte = 0x01 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +type fieldType byte + +const ( + fieldTypeDecimal fieldType = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) + +const ( + cachingSha2PasswordRequestPublicKey = 2 + cachingSha2PasswordFastAuthSuccess = 3 + cachingSha2PasswordPerformFullAuthentication = 4 +) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 00000000..e9ede2c8 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,172 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package. +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" + "sync" +) + +// watcher interface is used for context support (From Go 1.8) +type watcher interface { + startWatcher() +} + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var ( + dialsLock sync.RWMutex + dials map[string]DialFunc +) + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + dialsLock.Lock() + defer dialsLock.Unlock() + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + closech: make(chan struct{}), + } + mc.cfg, err = ParseDSN(dsn) + if err != nil { + return nil, err + } + mc.parseTime = mc.cfg.ParseTime + + // Connect to Server + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + mc.netConn, err = dial(mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + // Call startWatcher for context support (From Go 1.8) + if s, ok := interface{}(mc).(watcher); ok { + s.startWatcher() + } + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + authData, plugin, err := mc.readHandshakePacket() + if err != nil { + mc.cleanup() + return nil, err + } + if plugin == "" { + plugin = defaultAuthPlugin + } + + // Send Client Authentication Packet + authResp, err := mc.auth(authData, plugin) + if err != nil { + // try the default auth plugin, if using the requested plugin failed + errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) + plugin = defaultAuthPlugin + authResp, err = mc.auth(authData, plugin) + if err != nil { + mc.cleanup() + return nil, err + } + } + if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = mc.handleAuthResult(authData, plugin); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 00000000..be014bab --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,611 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/rsa" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + ServerPubKey string // Server public key name + pubKey *rsa.PublicKey // Server public key + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + RejectReadOnly bool // Reject read-only connections +} + +// NewConfig creates a new Config and sets default values. +func NewConfig() *Config { + return &Config{ + Collation: defaultCollation, + Loc: time.UTC, + MaxAllowedPacket: defaultMaxAllowedPacket, + AllowNativePasswords: true, + } +} + +func (cfg *Config) normalize() error { + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return errors.New("default addr for network '" + cfg.Net + "' unknown") + } + + } else if cfg.Net == "tcp" { + cfg.Addr = ensureHavePort(cfg.Addr) + } + + if cfg.tls != nil { + if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + cfg.tls.ServerName = host + } + } + } + + return nil +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + if hasParam { + buf.WriteString("&allowCleartextPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowCleartextPasswords=true") + } + } + + if !cfg.AllowNativePasswords { + if hasParam { + buf.WriteString("&allowNativePasswords=false") + } else { + hasParam = true + buf.WriteString("?allowNativePasswords=false") + } + } + + if cfg.AllowOldPasswords { + if hasParam { + buf.WriteString("&allowOldPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowOldPasswords=true") + } + } + + if cfg.ClientFoundRows { + if hasParam { + buf.WriteString("&clientFoundRows=true") + } else { + hasParam = true + buf.WriteString("?clientFoundRows=true") + } + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + if hasParam { + buf.WriteString("&collation=") + } else { + hasParam = true + buf.WriteString("?collation=") + } + buf.WriteString(col) + } + + if cfg.ColumnsWithAlias { + if hasParam { + buf.WriteString("&columnsWithAlias=true") + } else { + hasParam = true + buf.WriteString("?columnsWithAlias=true") + } + } + + if cfg.InterpolateParams { + if hasParam { + buf.WriteString("&interpolateParams=true") + } else { + hasParam = true + buf.WriteString("?interpolateParams=true") + } + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + if hasParam { + buf.WriteString("&loc=") + } else { + hasParam = true + buf.WriteString("?loc=") + } + buf.WriteString(url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + if hasParam { + buf.WriteString("&multiStatements=true") + } else { + hasParam = true + buf.WriteString("?multiStatements=true") + } + } + + if cfg.ParseTime { + if hasParam { + buf.WriteString("&parseTime=true") + } else { + hasParam = true + buf.WriteString("?parseTime=true") + } + } + + if cfg.ReadTimeout > 0 { + if hasParam { + buf.WriteString("&readTimeout=") + } else { + hasParam = true + buf.WriteString("?readTimeout=") + } + buf.WriteString(cfg.ReadTimeout.String()) + } + + if cfg.RejectReadOnly { + if hasParam { + buf.WriteString("&rejectReadOnly=true") + } else { + hasParam = true + buf.WriteString("?rejectReadOnly=true") + } + } + + if len(cfg.ServerPubKey) > 0 { + if hasParam { + buf.WriteString("&serverPubKey=") + } else { + hasParam = true + buf.WriteString("?serverPubKey=") + } + buf.WriteString(url.QueryEscape(cfg.ServerPubKey)) + } + + if cfg.Timeout > 0 { + if hasParam { + buf.WriteString("&timeout=") + } else { + hasParam = true + buf.WriteString("?timeout=") + } + buf.WriteString(cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + if hasParam { + buf.WriteString("&tls=") + } else { + hasParam = true + buf.WriteString("?tls=") + } + buf.WriteString(url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + if hasParam { + buf.WriteString("&writeTimeout=") + } else { + hasParam = true + buf.WriteString("?writeTimeout=") + } + buf.WriteString(cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { + if hasParam { + buf.WriteString("&maxAllowedPacket=") + } else { + hasParam = true + buf.WriteString("?maxAllowedPacket=") + } + buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket)) + + } + + // other params + if cfg.Params != nil { + var params []string + for param := range cfg.Params { + params = append(params, param) + } + sort.Strings(params) + for _, param := range params { + if hasParam { + buf.WriteByte('&') + } else { + hasParam = true + buf.WriteByte('?') + } + + buf.WriteString(param) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(cfg.Params[param])) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = NewConfig() + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if err = cfg.normalize(); err != nil { + return nil, err + } + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Reject read-only connections + case "rejectReadOnly": + var isBool bool + cfg.RejectReadOnly, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Server public key + case "serverPubKey": + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for server pub key name: %v", err) + } + + if pubKey := getServerPubKey(name); pubKey != nil { + cfg.ServerPubKey = name + cfg.pubKey = pubKey + } else { + return errors.New("invalid value / unknown server pub key name: " + name) + } + + // Strict mode + case "strict": + panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + cfg.tls = &tls.Config{} + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" { + cfg.TLSConfig = vl + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + + if tlsConfig := getTLSConfigClone(name); tlsConfig != nil { + cfg.TLSConfig = name + cfg.tls = tlsConfig + } else { + return errors.New("invalid value / unknown config name: " + name) + } + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +func ensureHavePort(addr string) string { + if _, _, err := net.SplitHostPort(addr); err != nil { + return net.JoinHostPort(addr, "3306") + } + return addr +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 00000000..760782ff --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,65 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "errors" + "fmt" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") + + // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. + // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn + // to trigger a resend. + // See https://github.com/go-sql-driver/mysql/pull/302 + errBadConnNoWrite = errors.New("bad connection") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go new file mode 100644 index 00000000..e1e2ece4 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -0,0 +1,194 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql" + "reflect" +) + +func (mf *mysqlField) typeDatabaseName() string { + switch mf.fieldType { + case fieldTypeBit: + return "BIT" + case fieldTypeBLOB: + if mf.charSet != collations[binaryCollation] { + return "TEXT" + } + return "BLOB" + case fieldTypeDate: + return "DATE" + case fieldTypeDateTime: + return "DATETIME" + case fieldTypeDecimal: + return "DECIMAL" + case fieldTypeDouble: + return "DOUBLE" + case fieldTypeEnum: + return "ENUM" + case fieldTypeFloat: + return "FLOAT" + case fieldTypeGeometry: + return "GEOMETRY" + case fieldTypeInt24: + return "MEDIUMINT" + case fieldTypeJSON: + return "JSON" + case fieldTypeLong: + return "INT" + case fieldTypeLongBLOB: + if mf.charSet != collations[binaryCollation] { + return "LONGTEXT" + } + return "LONGBLOB" + case fieldTypeLongLong: + return "BIGINT" + case fieldTypeMediumBLOB: + if mf.charSet != collations[binaryCollation] { + return "MEDIUMTEXT" + } + return "MEDIUMBLOB" + case fieldTypeNewDate: + return "DATE" + case fieldTypeNewDecimal: + return "DECIMAL" + case fieldTypeNULL: + return "NULL" + case fieldTypeSet: + return "SET" + case fieldTypeShort: + return "SMALLINT" + case fieldTypeString: + if mf.charSet == collations[binaryCollation] { + return "BINARY" + } + return "CHAR" + case fieldTypeTime: + return "TIME" + case fieldTypeTimestamp: + return "TIMESTAMP" + case fieldTypeTiny: + return "TINYINT" + case fieldTypeTinyBLOB: + if mf.charSet != collations[binaryCollation] { + return "TINYTEXT" + } + return "TINYBLOB" + case fieldTypeVarChar: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeVarString: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeYear: + return "YEAR" + default: + return "" + } +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(NullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +type mysqlField struct { + tableName string + name string + length uint32 + flags fieldFlag + fieldType fieldType + decimals byte + charSet uint8 +} + +func (mf *mysqlField) scanType() reflect.Type { + switch mf.fieldType { + case fieldTypeTiny: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint8 + } + return scanTypeInt8 + } + return scanTypeNullInt + + case fieldTypeShort, fieldTypeYear: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint16 + } + return scanTypeInt16 + } + return scanTypeNullInt + + case fieldTypeInt24, fieldTypeLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint32 + } + return scanTypeInt32 + } + return scanTypeNullInt + + case fieldTypeLongLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint64 + } + return scanTypeInt64 + } + return scanTypeNullInt + + case fieldTypeFloat: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat32 + } + return scanTypeNullFloat + + case fieldTypeDouble: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat64 + } + return scanTypeNullFloat + + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeTime: + return scanTypeRawBytes + + case fieldTypeDate, fieldTypeNewDate, + fieldTypeTimestamp, fieldTypeDateTime: + // NullTime is always returned for more consistent behavior as it can + // handle both cases of parseTime regardless if the field is nullable. + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 00000000..273cb0ba --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + // if packetSize == 0, the Reader contains no data + if err == nil && packetSize > 0 { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } + + mc.readPacket() + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 00000000..9ed64085 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1286 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)−1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, ErrInvalidConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + mc.cleanup() + errLog.Print(ErrMalformPkt) + } else { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } + if n == 0 && pktLen == len(data)-4 { + // only for the first loop iteration when nothing was written yet + return errBadConnNoWrite + } + mc.cleanup() + errLog.Print(err) + } + return ErrInvalidConn + } +} + +/****************************************************************************** +* Initialization Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { + data, err = mc.readPacket() + if err != nil { + // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since + // in connection initialization we don't risk retrying non-idempotent actions. + if err == ErrInvalidConn { + return nil, "", driver.ErrBadConn + } + return + } + + if data[0] == iERR { + return nil, "", mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, "", fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + authData := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, "", ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, "", ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + authData = append(authData, data[pos:pos+12]...) + pos += 13 + + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + if end := bytes.IndexByte(data[pos:], 0x00); end != -1 { + plugin = string(data[pos : pos+end]) + } else { + plugin = string(data[pos:]) + } + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], authData) + return b[:], plugin, nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], authData) + return b[:], plugin, nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // encode length of the auth plugin data + var authRespLEIBuf [9]byte + authRespLen := len(authResp) + authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen)) + if len(authRespLEI) > 1 { + // if the length can not be written in 1 byte, it must be written as a + // length encoded integer + clientFlags |= clientPluginAuthLenEncClientData + } + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // Auth Data [length encoded integer] + pos += copy(data[pos:], authRespLEI) + pos += copy(data[pos:], authResp) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + pos += copy(data[pos:], plugin) + data[pos] = 0x00 + pos++ + + // Send Auth packet + return mc.writePacket(data[:pos]) +} + +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { + pktLen := 4 + len(authData) + data := mc.buf.takeSmallBuffer(pktLen) + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add the auth data [EOF] + copy(data[4:], authData) + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { + data, err := mc.readPacket() + if err != nil { + return nil, "", err + } + + // packet indicator + switch data[0] { + + case iOK: + return nil, "", mc.handleOkPacket(data) + + case iAuthMoreData: + return data[1:], "", err + + case iEOF: + if len(data) == 1 { + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, "mysql_old_password", nil + } + pluginEndIndex := bytes.IndexByte(data, 0x00) + if pluginEndIndex < 0 { + return nil, "", ErrMalformPkt + } + plugin := string(data[1:pluginEndIndex]) + authData := data[pluginEndIndex+1:] + return authData, plugin, nil + + default: // Error otherwise + return nil, "", mc.handleErrorPacket(data) + } +} + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err != nil { + return err + } + + if data[0] == iOK { + return mc.handleOkPacket(data) + } + return mc.handleErrorPacket(data) +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION + // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) + if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // Oops; we are connected to a read-only connection, and won't be able + // to issue any write statements. Since RejectReadOnly is configured, + // we throw away this connection hoping this one would have write + // permission. This is specifically for a possible race condition + // during failover (e.g. on AWS Aurora). See README.md for more. + // + // We explicitly close the connection before returning + // driver.ErrBadConn to ensure that `database/sql` purges this + // connection and initiates a new one for next statement next time. + mc.Close() + return driver.ErrBadConn + } + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if mc.status&statusMoreResultsExists != 0 { + return nil + } + + // warning count [2 bytes] + + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Filler [uint8] + pos++ + + // Charset [charset, collation uint8] + columns[i].charSet = data[pos] + pos += 2 + + // Length [uint32] + columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) + pos += 4 + + // Field type [uint8] + columns[i].fieldType = fieldType(data[pos]) + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + if rows.rs.done { + return io.EOF + } + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.rs.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Cannot use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Determine threshould dynamically to avoid packet size shortage. + longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) + if longDataSize < 64 { + longDataSize = 64 + } + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = byte(fieldTypeDouble) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = byte(fieldTypeTiny) + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + var a [64]byte + var b = a[:0] + + if v.IsZero() { + b = append(b, "0000-00-00"...) + } else { + b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(b)), + ) + paramValues = append(paramValues, b...) + + default: + return fmt.Errorf("cannot convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + mc := rows.mc + rows.mc = nil + + // Error otherwise + return mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.rs.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.rs.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.rs.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 00000000..c6438d03 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 00000000..d3b1e282 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,216 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "math" + "reflect" +) + +type resultSet struct { + columns []mysqlField + columnNames []string + done bool +} + +type mysqlRows struct { + mc *mysqlConn + rs resultSet + finish func() +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +func (rows *mysqlRows) Columns() []string { + if rows.rs.columnNames != nil { + return rows.rs.columnNames + } + + columns := make([]string, len(rows.rs.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.rs.columns[i].name + } else { + columns[i] = rows.rs.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.rs.columns[i].name + } + } + + rows.rs.columnNames = columns + return columns +} + +func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { + return rows.rs.columns[i].typeDatabaseName() +} + +// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { +// return int64(rows.rs.columns[i].length), true +// } + +func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case fieldTypeDecimal, fieldTypeNewDecimal: + if decimals > 0 { + return int64(column.length) - 2, decimals, true + } + return int64(column.length) - 1, decimals, true + case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: + return decimals, decimals, true + case fieldTypeFloat, fieldTypeDouble: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + +func (rows *mysqlRows) Close() (err error) { + if f := rows.finish; f != nil { + f() + rows.finish = nil + } + + mc := rows.mc + if mc == nil { + return nil + } + if err := mc.error(); err != nil { + return err + } + + // Remove unread packets from stream + if !rows.rs.done { + err = mc.readUntilEOF() + } + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *mysqlRows) HasNextResultSet() (b bool) { + if rows.mc == nil { + return false + } + return rows.mc.status&statusMoreResultsExists != 0 +} + +func (rows *mysqlRows) nextResultSet() (int, error) { + if rows.mc == nil { + return 0, io.EOF + } + if err := rows.mc.error(); err != nil { + return 0, err + } + + // Remove unread packets from stream + if !rows.rs.done { + if err := rows.mc.readUntilEOF(); err != nil { + return 0, err + } + rows.rs.done = true + } + + if !rows.HasNextResultSet() { + rows.mc = nil + return 0, io.EOF + } + rows.rs = resultSet{} + return rows.mc.readResultSetHeaderPacket() +} + +func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { + for { + resLen, err := rows.nextResultSet() + if err != nil { + return 0, err + } + + if resLen > 0 { + return resLen, nil + } + + rows.rs.done = true + } +} + +func (rows *binaryRows) NextResultSet() error { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) NextResultSet() (err error) { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 00000000..ce7fe4cd --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,211 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "io" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.closed.IsSet() { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + if resLen > 0 { + // Columns + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + + // Rows + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if err := mc.discardResults(); err != nil { + return nil, err + } + + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + return stmt.query(args) +} + +func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + rows.rs.columns, err = mc.readColumns(resLen) + } else { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + return rows, err +} + +type converter struct{} + +// ConvertValue mirrors the reference/default converter in database/sql/driver +// with _one_ exception. We support uint64 with their high bit and the default +// implementation does not. This function should be kept in sync with +// database/sql/driver defaultConverter.ConvertValue() except for that +// deliberate difference. +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + if vr, ok := v.(driver.Valuer); ok { + sv, err := callValuerValue(vr) + if err != nil { + return nil, err + } + if !driver.IsValue(sv) { + return nil, fmt.Errorf("non-Value type %T returned from Value", sv) + } + return sv, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } else { + return c.ConvertValue(rv.Elem().Interface()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + case reflect.Bool: + return rv.Bool(), nil + case reflect.Slice: + ek := rv.Type().Elem().Kind() + if ek == reflect.Uint8 { + return rv.Bytes(), nil + } + return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek) + case reflect.String: + return rv.String(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} + +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// callValuerValue returns vr.Value(), with one exception: +// If vr.Value is an auto-generated method on a pointer type and the +// pointer is nil, it would panic at runtime in the panicwrap +// method. Treat it like nil instead. +// +// This is so people can implement driver.Value on value types and +// still use nil pointers to those types to mean nil/NULL, just like +// string/*string. +// +// This is an exact copy of the same-named unexported function from the +// database/sql package. +func callValuerValue(vr driver.Valuer) (v driver.Value, err error) { + if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr && + rv.IsNil() && + rv.Type().Elem().Implements(valuerReflectType) { + return nil, nil + } + return vr.Value() +} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 00000000..417d7279 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 00000000..ca5d47d8 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,726 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Registry for custom tls.Configs +var ( + tlsConfigLock sync.RWMutex + tlsConfigRegistry map[string]*tls.Config +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// Note: The provided tls.Config is exclusively owned by the driver after +// registering it. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("key '%s' is reserved", key) + } + + tlsConfigLock.Lock() + if tlsConfigRegistry == nil { + tlsConfigRegistry = make(map[string]*tls.Config) + } + + tlsConfigRegistry[key] = config + tlsConfigLock.Unlock() + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + tlsConfigLock.Lock() + if tlsConfigRegistry != nil { + delete(tlsConfigRegistry, key) + } + tlsConfigLock.Unlock() +} + +func getTLSConfigClone(key string) (config *tls.Config) { + tlsConfigLock.RLock() + if v, ok := tlsConfigRegistry[key]; ok { + config = cloneTLSConfig(v) + } + tlsConfigLock.RUnlock() + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func appendMicrosecs(dst, src []byte, decimals int) []byte { + if decimals <= 0 { + return dst + } + if len(src) == 0 { + return append(dst, ".000000"[:decimals+1]...) + } + + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 := byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 := byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 := byte(microsecs) + + switch decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ) + case 1: + return append(dst, '.', + digits10[p1], + ) + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ) + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ) + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ) + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ) + } +} + +func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[:length], nil + } + var dst []byte // return value + var p1, p2, p3 byte // current digit pair + + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt := year / 100 + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + return appendMicrosecs(dst, src[2:], int(length)-20), nil +} + +func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[11 : 11+length], nil + } + var dst []byte // return value + + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + days := binary.LittleEndian.Uint32(src[1:5]) + hours := int64(days)*24 + int64(src[5]) + + if hours >= 100 { + dst = strconv.AppendInt(dst, hours, 10) + } else { + dst = append(dst, digits10[hours], digits01[hours]) + } + + min, sec := src[6], src[7] + dst = append(dst, ':', + digits10[min], digits01[min], ':', + digits10[sec], digits01[sec], + ) + return appendMicrosecs(dst, src[8:], int(length)-9), nil +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + + switch b[0] { + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +/****************************************************************************** +* Sync utils * +******************************************************************************/ + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// atomicBool is a wrapper around uint32 for usage as a boolean value with +// atomic access. +type atomicBool struct { + _noCopy noCopy + value uint32 +} + +// IsSet returns wether the current boolean value is true +func (ab *atomicBool) IsSet() bool { + return atomic.LoadUint32(&ab.value) > 0 +} + +// Set sets the value of the bool regardless of the previous value +func (ab *atomicBool) Set(value bool) { + if value { + atomic.StoreUint32(&ab.value, 1) + } else { + atomic.StoreUint32(&ab.value, 0) + } +} + +// TrySet sets the value of the bool and returns wether the value changed +func (ab *atomicBool) TrySet(value bool) bool { + if value { + return atomic.SwapUint32(&ab.value, 1) == 0 + } + return atomic.SwapUint32(&ab.value, 0) > 0 +} + +// atomicError is a wrapper for atomically accessed error values +type atomicError struct { + _noCopy noCopy + value atomic.Value +} + +// Set sets the error value regardless of the previous value. +// The value must not be nil +func (ae *atomicError) Set(value error) { + ae.value.Store(value) +} + +// Value returns the current error value +func (ae *atomicError) Value() error { + if v := ae.value.Load(); v != nil { + // this will panic if the value doesn't implement the error interface + return v.(error) + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go new file mode 100644 index 00000000..f5956345 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go @@ -0,0 +1,40 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.7 +// +build !go1.8 + +package mysql + +import "crypto/tls" + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go new file mode 100644 index 00000000..c35c2a6a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go @@ -0,0 +1,50 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "errors" + "fmt" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return c.Clone() +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("mysql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + +func mapIsolationLevel(level driver.IsolationLevel) (string, error) { + switch sql.IsolationLevel(level) { + case sql.LevelRepeatableRead: + return "REPEATABLE READ", nil + case sql.LevelReadCommitted: + return "READ COMMITTED", nil + case sql.LevelReadUncommitted: + return "READ UNCOMMITTED", nil + case sql.LevelSerializable: + return "SERIALIZABLE", nil + default: + return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) + } +} diff --git a/vendor/github.com/go-test/deep/.gitignore b/vendor/github.com/go-test/deep/.gitignore new file mode 100644 index 00000000..53f12f0f --- /dev/null +++ b/vendor/github.com/go-test/deep/.gitignore @@ -0,0 +1,2 @@ +*.swp +*.out diff --git a/vendor/github.com/go-test/deep/.travis.yml b/vendor/github.com/go-test/deep/.travis.yml new file mode 100644 index 00000000..df3972fc --- /dev/null +++ b/vendor/github.com/go-test/deep/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.10" + - "1.11" + - "1.12" + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cover + +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/go-test/deep/CHANGES.md b/vendor/github.com/go-test/deep/CHANGES.md new file mode 100644 index 00000000..64ac3cbc --- /dev/null +++ b/vendor/github.com/go-test/deep/CHANGES.md @@ -0,0 +1,22 @@ +# go-test/deep Changelog + +## v1.0.2 released 2019-07-14 + +* Enabled Go module (@radeksimko) +* Changed supported and tested Go versions: 1.10, 1.11, and 1.12 (dropped 1.9) +* Changed Error equality: additional struct fields are compared too (PR #29) (@andrewmostello) +* Fixed typos and ineffassign issues (PR #25) (@tariq1890) +* Fixed diff order for nil comparison (PR #16) (@gmarik) +* Fixed slice equality when slices are extracted from the same array (PR #11) (@risteli) +* Fixed test spelling and messages (PR #19) (@sofuture) +* Fixed issue #15: panic on comparing struct with anonymous time.Time +* Fixed issue #18: Panic when comparing structs with time.Time value and CompareUnexportedFields is true +* Fixed issue #21: Set default MaxDepth = 0 (disabled) (PR #23) + +## v1.0.1 released 2018-01-28 + +* Fixed issue #12: Arrays are not properly compared (@samlitowitz) + +## v1.0.0 releaesd 2017-10-27 + +* First release diff --git a/vendor/github.com/go-test/deep/LICENSE b/vendor/github.com/go-test/deep/LICENSE new file mode 100644 index 00000000..228ef16f --- /dev/null +++ b/vendor/github.com/go-test/deep/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright 2015-2017 Daniel Nichter + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-test/deep/README.md b/vendor/github.com/go-test/deep/README.md new file mode 100644 index 00000000..3b78eac7 --- /dev/null +++ b/vendor/github.com/go-test/deep/README.md @@ -0,0 +1,51 @@ +# Deep Variable Equality for Humans + +[![Go Report Card](https://goreportcard.com/badge/github.com/go-test/deep)](https://goreportcard.com/report/github.com/go-test/deep) [![Build Status](https://travis-ci.org/go-test/deep.svg?branch=master)](https://travis-ci.org/go-test/deep) [![Coverage Status](https://coveralls.io/repos/github/go-test/deep/badge.svg?branch=master)](https://coveralls.io/github/go-test/deep?branch=master) [![GoDoc](https://godoc.org/github.com/go-test/deep?status.svg)](https://godoc.org/github.com/go-test/deep) + +This package provides a single function: `deep.Equal`. It's like [reflect.DeepEqual](http://golang.org/pkg/reflect/#DeepEqual) but much friendlier to humans (or any sentient being) for two reason: + +* `deep.Equal` returns a list of differences +* `deep.Equal` does not compare unexported fields (by default) + +`reflect.DeepEqual` is good (like all things Golang!), but it's a game of [Hunt the Wumpus](https://en.wikipedia.org/wiki/Hunt_the_Wumpus). For large maps, slices, and structs, finding the difference is difficult. + +`deep.Equal` doesn't play games with you, it lists the differences: + +```go +package main_test + +import ( + "testing" + "github.com/go-test/deep" +) + +type T struct { + Name string + Numbers []float64 +} + +func TestDeepEqual(t *testing.T) { + // Can you spot the difference? + t1 := T{ + Name: "Isabella", + Numbers: []float64{1.13459, 2.29343, 3.010100010}, + } + t2 := T{ + Name: "Isabella", + Numbers: []float64{1.13459, 2.29843, 3.010100010}, + } + + if diff := deep.Equal(t1, t2); diff != nil { + t.Error(diff) + } +} +``` + + +``` +$ go test +--- FAIL: TestDeepEqual (0.00s) + main_test.go:25: [Numbers.slice[1]: 2.29343 != 2.29843] +``` + +The difference is in `Numbers.slice[1]`: the two values aren't equal using Go `==`. diff --git a/vendor/github.com/go-test/deep/deep.go b/vendor/github.com/go-test/deep/deep.go new file mode 100644 index 00000000..0eb520f1 --- /dev/null +++ b/vendor/github.com/go-test/deep/deep.go @@ -0,0 +1,368 @@ +// Package deep provides function deep.Equal which is like reflect.DeepEqual but +// returns a list of differences. This is helpful when comparing complex types +// like structures and maps. +package deep + +import ( + "errors" + "fmt" + "log" + "reflect" + "strings" +) + +var ( + // FloatPrecision is the number of decimal places to round float values + // to when comparing. + FloatPrecision = 10 + + // MaxDiff specifies the maximum number of differences to return. + MaxDiff = 10 + + // MaxDepth specifies the maximum levels of a struct to recurse into, + // if greater than zero. If zero, there is no limit. + MaxDepth = 0 + + // LogErrors causes errors to be logged to STDERR when true. + LogErrors = false + + // CompareUnexportedFields causes unexported struct fields, like s in + // T{s int}, to be compared when true. + CompareUnexportedFields = false +) + +var ( + // ErrMaxRecursion is logged when MaxDepth is reached. + ErrMaxRecursion = errors.New("recursed to MaxDepth") + + // ErrTypeMismatch is logged when Equal passed two different types of values. + ErrTypeMismatch = errors.New("variables are different reflect.Type") + + // ErrNotHandled is logged when a primitive Go kind is not handled. + ErrNotHandled = errors.New("cannot compare the reflect.Kind") +) + +type cmp struct { + diff []string + buff []string + floatFormat string +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +// Equal compares variables a and b, recursing into their structure up to +// MaxDepth levels deep (if greater than zero), and returns a list of differences, +// or nil if there are none. Some differences may not be found if an error is +// also returned. +// +// If a type has an Equal method, like time.Equal, it is called to check for +// equality. +func Equal(a, b interface{}) []string { + aVal := reflect.ValueOf(a) + bVal := reflect.ValueOf(b) + c := &cmp{ + diff: []string{}, + buff: []string{}, + floatFormat: fmt.Sprintf("%%.%df", FloatPrecision), + } + if a == nil && b == nil { + return nil + } else if a == nil && b != nil { + c.saveDiff("", b) + } else if a != nil && b == nil { + c.saveDiff(a, "") + } + if len(c.diff) > 0 { + return c.diff + } + + c.equals(aVal, bVal, 0) + if len(c.diff) > 0 { + return c.diff // diffs + } + return nil // no diffs +} + +func (c *cmp) equals(a, b reflect.Value, level int) { + if MaxDepth > 0 && level > MaxDepth { + logError(ErrMaxRecursion) + return + } + + // Check if one value is nil, e.g. T{x: *X} and T.x is nil + if !a.IsValid() || !b.IsValid() { + if a.IsValid() && !b.IsValid() { + c.saveDiff(a.Type(), "") + } else if !a.IsValid() && b.IsValid() { + c.saveDiff("", b.Type()) + } + return + } + + // If differenet types, they can't be equal + aType := a.Type() + bType := b.Type() + if aType != bType { + c.saveDiff(aType, bType) + logError(ErrTypeMismatch) + return + } + + // Primitive https://golang.org/pkg/reflect/#Kind + aKind := a.Kind() + bKind := b.Kind() + + // If both types implement the error interface, compare the error strings. + // This must be done before dereferencing because the interface is on a + // pointer receiver. + if aType.Implements(errorType) && bType.Implements(errorType) { + if a.Elem().IsValid() && b.Elem().IsValid() { // both err != nil + aString := a.MethodByName("Error").Call(nil)[0].String() + bString := b.MethodByName("Error").Call(nil)[0].String() + if aString != bString { + c.saveDiff(aString, bString) + return + } + } + } + + // Dereference pointers and interface{} + if aElem, bElem := aKind == reflect.Ptr || aKind == reflect.Interface, + bKind == reflect.Ptr || bKind == reflect.Interface; aElem || bElem { + + if aElem { + a = a.Elem() + } + + if bElem { + b = b.Elem() + } + + c.equals(a, b, level+1) + return + } + + switch aKind { + + ///////////////////////////////////////////////////////////////////// + // Iterable kinds + ///////////////////////////////////////////////////////////////////// + + case reflect.Struct: + /* + The variables are structs like: + type T struct { + FirstName string + LastName string + } + Type = .T, Kind = reflect.Struct + + Iterate through the fields (FirstName, LastName), recurse into their values. + */ + + // Types with an Equal() method, like time.Time, only if struct field + // is exported (CanInterface) + if eqFunc := a.MethodByName("Equal"); eqFunc.IsValid() && eqFunc.CanInterface() { + // Handle https://github.com/go-test/deep/issues/15: + // Don't call T.Equal if the method is from an embedded struct, like: + // type Foo struct { time.Time } + // First, we'll encounter Equal(Ttime, time.Time) but if we pass b + // as the 2nd arg we'll panic: "Call using pkg.Foo as type time.Time" + // As far as I can tell, there's no way to see that the method is from + // time.Time not Foo. So we check the type of the 1st (0) arg and skip + // unless it's b type. Later, we'll encounter the time.Time anonymous/ + // embedded field and then we'll have Equal(time.Time, time.Time). + funcType := eqFunc.Type() + if funcType.NumIn() == 1 && funcType.In(0) == bType { + retVals := eqFunc.Call([]reflect.Value{b}) + if !retVals[0].Bool() { + c.saveDiff(a, b) + } + return + } + } + + for i := 0; i < a.NumField(); i++ { + if aType.Field(i).PkgPath != "" && !CompareUnexportedFields { + continue // skip unexported field, e.g. s in type T struct {s string} + } + + c.push(aType.Field(i).Name) // push field name to buff + + // Get the Value for each field, e.g. FirstName has Type = string, + // Kind = reflect.String. + af := a.Field(i) + bf := b.Field(i) + + // Recurse to compare the field values + c.equals(af, bf, level+1) + + c.pop() // pop field name from buff + + if len(c.diff) >= MaxDiff { + break + } + } + case reflect.Map: + /* + The variables are maps like: + map[string]int{ + "foo": 1, + "bar": 2, + } + Type = map[string]int, Kind = reflect.Map + + Or: + type T map[string]int{} + Type = .T, Kind = reflect.Map + + Iterate through the map keys (foo, bar), recurse into their values. + */ + + if a.IsNil() || b.IsNil() { + if a.IsNil() && !b.IsNil() { + c.saveDiff("", b) + } else if !a.IsNil() && b.IsNil() { + c.saveDiff(a, "") + } + return + } + + if a.Pointer() == b.Pointer() { + return + } + + for _, key := range a.MapKeys() { + c.push(fmt.Sprintf("map[%s]", key)) + + aVal := a.MapIndex(key) + bVal := b.MapIndex(key) + if bVal.IsValid() { + c.equals(aVal, bVal, level+1) + } else { + c.saveDiff(aVal, "") + } + + c.pop() + + if len(c.diff) >= MaxDiff { + return + } + } + + for _, key := range b.MapKeys() { + if aVal := a.MapIndex(key); aVal.IsValid() { + continue + } + + c.push(fmt.Sprintf("map[%s]", key)) + c.saveDiff("", b.MapIndex(key)) + c.pop() + if len(c.diff) >= MaxDiff { + return + } + } + case reflect.Array: + n := a.Len() + for i := 0; i < n; i++ { + c.push(fmt.Sprintf("array[%d]", i)) + c.equals(a.Index(i), b.Index(i), level+1) + c.pop() + if len(c.diff) >= MaxDiff { + break + } + } + case reflect.Slice: + if a.IsNil() || b.IsNil() { + if a.IsNil() && !b.IsNil() { + c.saveDiff("", b) + } else if !a.IsNil() && b.IsNil() { + c.saveDiff(a, "") + } + return + } + + aLen := a.Len() + bLen := b.Len() + + if a.Pointer() == b.Pointer() && aLen == bLen { + return + } + + n := aLen + if bLen > aLen { + n = bLen + } + for i := 0; i < n; i++ { + c.push(fmt.Sprintf("slice[%d]", i)) + if i < aLen && i < bLen { + c.equals(a.Index(i), b.Index(i), level+1) + } else if i < aLen { + c.saveDiff(a.Index(i), "") + } else { + c.saveDiff("", b.Index(i)) + } + c.pop() + if len(c.diff) >= MaxDiff { + break + } + } + + ///////////////////////////////////////////////////////////////////// + // Primitive kinds + ///////////////////////////////////////////////////////////////////// + + case reflect.Float32, reflect.Float64: + // Avoid 0.04147685731961082 != 0.041476857319611 + // 6 decimal places is close enough + aval := fmt.Sprintf(c.floatFormat, a.Float()) + bval := fmt.Sprintf(c.floatFormat, b.Float()) + if aval != bval { + c.saveDiff(a.Float(), b.Float()) + } + case reflect.Bool: + if a.Bool() != b.Bool() { + c.saveDiff(a.Bool(), b.Bool()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a.Int() != b.Int() { + c.saveDiff(a.Int(), b.Int()) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if a.Uint() != b.Uint() { + c.saveDiff(a.Uint(), b.Uint()) + } + case reflect.String: + if a.String() != b.String() { + c.saveDiff(a.String(), b.String()) + } + + default: + logError(ErrNotHandled) + } +} + +func (c *cmp) push(name string) { + c.buff = append(c.buff, name) +} + +func (c *cmp) pop() { + if len(c.buff) > 0 { + c.buff = c.buff[0 : len(c.buff)-1] + } +} + +func (c *cmp) saveDiff(aval, bval interface{}) { + if len(c.buff) > 0 { + varName := strings.Join(c.buff, ".") + c.diff = append(c.diff, fmt.Sprintf("%s: %v != %v", varName, aval, bval)) + } else { + c.diff = append(c.diff, fmt.Sprintf("%v != %v", aval, bval)) + } +} + +func logError(err error) { + if LogErrors { + log.Println(err) + } +} diff --git a/vendor/github.com/go-test/deep/go.mod b/vendor/github.com/go-test/deep/go.mod new file mode 100644 index 00000000..6e8ca1d2 --- /dev/null +++ b/vendor/github.com/go-test/deep/go.mod @@ -0,0 +1 @@ +module github.com/go-test/deep diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 00000000..3d97fc7a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 00000000..1b4f6c20 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 00000000..f57de90d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/io/full.go b/vendor/github.com/gogo/protobuf/io/full.go new file mode 100644 index 00000000..550726a3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/io/full.go @@ -0,0 +1,102 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package io + +import ( + "github.com/gogo/protobuf/proto" + "io" +) + +func NewFullWriter(w io.Writer) WriteCloser { + return &fullWriter{w, nil} +} + +type fullWriter struct { + w io.Writer + buffer []byte +} + +func (this *fullWriter) WriteMsg(msg proto.Message) (err error) { + var data []byte + if m, ok := msg.(marshaler); ok { + n, ok := getSize(m) + if !ok { + data, err = proto.Marshal(msg) + if err != nil { + return err + } + } + if n >= len(this.buffer) { + this.buffer = make([]byte, n) + } + _, err = m.MarshalTo(this.buffer) + if err != nil { + return err + } + data = this.buffer[:n] + } else { + data, err = proto.Marshal(msg) + if err != nil { + return err + } + } + _, err = this.w.Write(data) + return err +} + +func (this *fullWriter) Close() error { + if closer, ok := this.w.(io.Closer); ok { + return closer.Close() + } + return nil +} + +type fullReader struct { + r io.Reader + buf []byte +} + +func NewFullReader(r io.Reader, maxSize int) ReadCloser { + return &fullReader{r, make([]byte, maxSize)} +} + +func (this *fullReader) ReadMsg(msg proto.Message) error { + length, err := this.r.Read(this.buf) + if err != nil { + return err + } + return proto.Unmarshal(this.buf[:length], msg) +} + +func (this *fullReader) Close() error { + if closer, ok := this.r.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/io/io.go b/vendor/github.com/gogo/protobuf/io/io.go new file mode 100644 index 00000000..6dca519a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/io/io.go @@ -0,0 +1,70 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package io + +import ( + "github.com/gogo/protobuf/proto" + "io" +) + +type Writer interface { + WriteMsg(proto.Message) error +} + +type WriteCloser interface { + Writer + io.Closer +} + +type Reader interface { + ReadMsg(msg proto.Message) error +} + +type ReadCloser interface { + Reader + io.Closer +} + +type marshaler interface { + MarshalTo(data []byte) (n int, err error) +} + +func getSize(v interface{}) (int, bool) { + if sz, ok := v.(interface { + Size() (n int) + }); ok { + return sz.Size(), true + } else if sz, ok := v.(interface { + ProtoSize() (n int) + }); ok { + return sz.ProtoSize(), true + } else { + return 0, false + } +} diff --git a/vendor/github.com/gogo/protobuf/io/uint32.go b/vendor/github.com/gogo/protobuf/io/uint32.go new file mode 100644 index 00000000..fc43857d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/io/uint32.go @@ -0,0 +1,138 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package io + +import ( + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" +) + +const uint32BinaryLen = 4 + +func NewUint32DelimitedWriter(w io.Writer, byteOrder binary.ByteOrder) WriteCloser { + return &uint32Writer{w, byteOrder, nil, make([]byte, uint32BinaryLen)} +} + +func NewSizeUint32DelimitedWriter(w io.Writer, byteOrder binary.ByteOrder, size int) WriteCloser { + return &uint32Writer{w, byteOrder, make([]byte, size), make([]byte, uint32BinaryLen)} +} + +type uint32Writer struct { + w io.Writer + byteOrder binary.ByteOrder + buffer []byte + lenBuf []byte +} + +func (this *uint32Writer) writeFallback(msg proto.Message) error { + data, err := proto.Marshal(msg) + if err != nil { + return err + } + + length := uint32(len(data)) + this.byteOrder.PutUint32(this.lenBuf, length) + if _, err = this.w.Write(this.lenBuf); err != nil { + return err + } + _, err = this.w.Write(data) + return err +} + +func (this *uint32Writer) WriteMsg(msg proto.Message) error { + m, ok := msg.(marshaler) + if !ok { + return this.writeFallback(msg) + } + + n, ok := getSize(m) + if !ok { + return this.writeFallback(msg) + } + + size := n + uint32BinaryLen + if size > len(this.buffer) { + this.buffer = make([]byte, size) + } + + this.byteOrder.PutUint32(this.buffer, uint32(n)) + if _, err := m.MarshalTo(this.buffer[uint32BinaryLen:]); err != nil { + return err + } + + _, err := this.w.Write(this.buffer[:size]) + return err +} + +func (this *uint32Writer) Close() error { + if closer, ok := this.w.(io.Closer); ok { + return closer.Close() + } + return nil +} + +type uint32Reader struct { + r io.Reader + byteOrder binary.ByteOrder + lenBuf []byte + buf []byte + maxSize int +} + +func NewUint32DelimitedReader(r io.Reader, byteOrder binary.ByteOrder, maxSize int) ReadCloser { + return &uint32Reader{r, byteOrder, make([]byte, 4), nil, maxSize} +} + +func (this *uint32Reader) ReadMsg(msg proto.Message) error { + if _, err := io.ReadFull(this.r, this.lenBuf); err != nil { + return err + } + length32 := this.byteOrder.Uint32(this.lenBuf) + length := int(length32) + if length < 0 || length > this.maxSize { + return io.ErrShortBuffer + } + if length >= len(this.buf) { + this.buf = make([]byte, length) + } + _, err := io.ReadFull(this.r, this.buf[:length]) + if err != nil { + return err + } + return proto.Unmarshal(this.buf[:length], msg) +} + +func (this *uint32Reader) Close() error { + if closer, ok := this.r.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/io/varint.go b/vendor/github.com/gogo/protobuf/io/varint.go new file mode 100644 index 00000000..e81e296e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/io/varint.go @@ -0,0 +1,133 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package io + +import ( + "bufio" + "encoding/binary" + "errors" + "github.com/gogo/protobuf/proto" + "io" +) + +var ( + errSmallBuffer = errors.New("Buffer Too Small") + errLargeValue = errors.New("Value is Larger than 64 bits") +) + +func NewDelimitedWriter(w io.Writer) WriteCloser { + return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil} +} + +type varintWriter struct { + w io.Writer + lenBuf []byte + buffer []byte +} + +func (this *varintWriter) WriteMsg(msg proto.Message) (err error) { + var data []byte + if m, ok := msg.(marshaler); ok { + n, ok := getSize(m) + if ok { + if n+binary.MaxVarintLen64 >= len(this.buffer) { + this.buffer = make([]byte, n+binary.MaxVarintLen64) + } + lenOff := binary.PutUvarint(this.buffer, uint64(n)) + _, err = m.MarshalTo(this.buffer[lenOff:]) + if err != nil { + return err + } + _, err = this.w.Write(this.buffer[:lenOff+n]) + return err + } + } + + // fallback + data, err = proto.Marshal(msg) + if err != nil { + return err + } + length := uint64(len(data)) + n := binary.PutUvarint(this.lenBuf, length) + _, err = this.w.Write(this.lenBuf[:n]) + if err != nil { + return err + } + _, err = this.w.Write(data) + return err +} + +func (this *varintWriter) Close() error { + if closer, ok := this.w.(io.Closer); ok { + return closer.Close() + } + return nil +} + +func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { + var closer io.Closer + if c, ok := r.(io.Closer); ok { + closer = c + } + return &varintReader{bufio.NewReader(r), nil, maxSize, closer} +} + +type varintReader struct { + r *bufio.Reader + buf []byte + maxSize int + closer io.Closer +} + +func (this *varintReader) ReadMsg(msg proto.Message) error { + length64, err := binary.ReadUvarint(this.r) + if err != nil { + return err + } + length := int(length64) + if length < 0 || length > this.maxSize { + return io.ErrShortBuffer + } + if len(this.buf) < length { + this.buf = make([]byte, length) + } + buf := this.buf[:length] + if _, err := io.ReadFull(this.r, buf); err != nil { + return err + } + return proto.Unmarshal(buf, msg) +} + +func (this *varintReader) Close() error { + if this.closer != nil { + return this.closer.Close() + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 00000000..00d65f32 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 00000000..a26b046d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 00000000..24552483 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 00000000..63b0f08b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 00000000..35b882c0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 00000000..fe1bd7d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..e748e173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 00000000..3abfed2c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 00000000..0f5fb173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 00000000..d4db5a1c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 00000000..686bd2a0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,604 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 00000000..53ebd8cc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,368 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 00000000..d17f8020 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 00000000..b3aa3919 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 00000000..f48a7567 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..b6cad908 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..7ffd3c29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..d55a335d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 00000000..aca8eed0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 00000000..c9e5fa02 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,599 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 00000000..40ea3dd9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 00000000..5a5fd93f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..9b1538d0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 00000000..997f57c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 00000000..f520106e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..bb2622f2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 00000000..00d6c7ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 00000000..0407ba85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 00000000..1d6c6aa0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 00000000..1ce0be2f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..38439fa9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 00000000..b175d1b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 00000000..c1cf7bf8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 00000000..42cc4105 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000..abaf1e45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 00000000..5d56f4b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 00000000..44aa9bf2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 00000000..23486b6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 00000000..7815f501 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000..22ebc57d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 00000000..b6698c08 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,12 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.10 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 00000000..3a656dfd --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,18 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 00000000..7e86dc87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,230 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library to log to +// actually use this logger, which will also send to any registered sinks. +func (l *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriterIntercept(opts), "", 0) +} + +func (l *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 00000000..5d76ee3f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,648 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/fatih/color" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + } + + l.setColorization(opts) + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.logPlain(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`) + +// Non-JSON logging format function +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + offset := 3 + if l.caller { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + match := logImplFile.MatchString(file) + if match { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + switch st := args[i].(type) { + case string: + l.writer.WriteString(st) + default: + l.writer.WriteString(fmt.Sprintf("%s", st)) + } + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + for i := 0; i < len(args); i = i + 2 { + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +const MissingKey = "EXTRA_VALUE_AT_END" + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + + if len(args)%2 != 0 { + extra = args[len(args)-1] + args = args[:len(args)-1] + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 00000000..147bd2d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,262 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 00000000..bc14f770 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,58 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Name() string { return "" } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000..9b27bd3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000..2cf0456a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { + level, str := s.pickLevel(str) + s.dispatch(str, level) + } else { + s.log.Info(str) + } + + return len(data), nil +} + +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 00000000..421a1f06 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,82 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 00000000..1a0bbea6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 00000000..dd7c0efd --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,9 @@ +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000..4b6338b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000..a6367477 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/hashicorp/go-immutable-radix/go.mod new file mode 100644 index 00000000..27e7b7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-immutable-radix + +require ( + github.com/hashicorp/go-uuid v1.0.0 + github.com/hashicorp/golang-lru v0.5.0 +) diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/hashicorp/go-immutable-radix/go.sum new file mode 100644 index 00000000..7de5dfc5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000..e5e6e57f --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000..1ecaf831 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,188 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + if len(n.edges) > 0 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. + i.stack = []edges{} + n := i.node + search := key + + found := func(n *Node) { + i.node = n + i.stack = append(i.stack, edges{edge{node: n}}) + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + n = i.recurseMin(n) + if n != nil { + found(n) + } + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf we're done. + if n.leaf != nil { + if bytes.Compare(n.leaf.key, key) < 0 { + i.node = nil + return + } + found(n) + return + } + + // Consume the search prefix + if len(n.prefix) > len(search) { + search = []byte{} + } else { + search = search[len(n.prefix):] + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + i.node = nil + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + i.node = lbNode + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000..3ab904ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,304 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000..04814c13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore new file mode 100644 index 00000000..11b90db8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea diff --git a/vendor/github.com/hashicorp/go-memdb/.travis.yml b/vendor/github.com/hashicorp/go-memdb/.travis.yml new file mode 100644 index 00000000..9e770fa2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - "1.10" + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md new file mode 100644 index 00000000..65e1eaef --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/README.md @@ -0,0 +1,98 @@ +# go-memdb + +Provides the `memdb` package that implements a simple in-memory database +built on immutable radix trees. The database provides Atomicity, Consistency +and Isolation from ACID. Being that it is in-memory, it does not provide durability. +The database is instantiated with a schema that specifies the tables and indices +that exist and allows transactions to be executed. + +The database provides the following: + +* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees + the database is able to support any number of concurrent readers without locking, + and allows a writer to make progress. + +* Transaction Support - The database allows for rich transactions, in which multiple + objects are inserted, updated or deleted. The transactions can span multiple tables, + and are applied atomically. The database provides atomicity and isolation in ACID + terminology, such that until commit the updates are not visible. + +* Rich Indexing - Tables can support any number of indexes, which can be simple like + a single field index, or more advanced compound field indexes. Certain types like + UUID can be efficiently compressed from strings into byte indexes for reduced + storage requirements. + +* Watches - Callers can populate a watch set as part of a query, which can be used to + detect when a modification has been made to the database which affects the query + results. This lets callers easily watch for changes in the database in a very general + way. + +For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb). + +Example +======= + +Below is a simple example of usage + +```go +// Create a sample struct +type Person struct { + Email string + Name string + Age int +} + +// Create the DB schema +schema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + "person": &memdb.TableSchema{ + Name: "person", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Email"}, + }, + }, + }, + }, +} + +// Create a new data base +db, err := memdb.NewMemDB(schema) +if err != nil { + panic(err) +} + +// Create a write transaction +txn := db.Txn(true) + +// Insert a new person +p := &Person{"joe@aol.com", "Joe", 30} +if err := txn.Insert("person", p); err != nil { + panic(err) +} + +// Commit the transaction +txn.Commit() + +// Create read-only transaction +txn = db.Txn(false) +defer txn.Abort() + +// Lookup by email +raw, err := txn.First("person", "id", "joe@aol.com") +if err != nil { + panic(err) +} + +// Say hi! +fmt.Printf("Hello %s!", raw.(*Person).Name) + +``` + diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 00000000..2e3a9b3f --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,33 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +func NewFilterIterator(wrap ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: wrap, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/go.mod b/vendor/github.com/hashicorp/go-memdb/go.mod new file mode 100644 index 00000000..36330863 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-memdb + +go 1.12 + +require github.com/hashicorp/go-immutable-radix v1.0.0 diff --git a/vendor/github.com/hashicorp/go-memdb/go.sum b/vendor/github.com/hashicorp/go-memdb/go.sum new file mode 100644 index 00000000..d0643ee7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/go.sum @@ -0,0 +1,6 @@ +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 00000000..e368319e --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,848 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "reflect" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + isPtr := fv.Kind() == reflect.Ptr + fv = reflect.Indirect(fv) + if !isPtr && !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr) + } + + if isPtr && !fv.IsValid() { + val := "" + return true, []byte(val), nil + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +// +// Note that although FromArgs in theory supports using either one or +// two arguments, there is a bug: FromObject only creates an index +// using key/value, and does not also create an index using key. This +// means a lookup using one argument will never actually work. +// +// It is currently left as-is to prevent backwards compatibility +// issues. +// +// TODO: Fix this in the next major bump. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +// WARNING: Because of a bug in FromObject, this function will never return +// a value when using the single-argument version. +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// IntFieldIndex is used to extract an int field from an object using +// reflection and builds an index on that field. +type IntFieldIndex struct { + Field string +} + +func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsIntType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k) + } + + // Get the value and encode it + val := fv.Int() + buf := make([]byte, size) + binary.PutVarint(buf, val) + + return true, buf, nil +} + +func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsIntType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a int", k) + } + + val := v.Int() + buf := make([]byte, size) + binary.PutVarint(buf, val) + + return buf, nil +} + +// IsIntType returns whether the passed type is a type of int and the number +// of bytes needed to encode the type. +func IsIntType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Int: + return binary.MaxVarintLen64, true + case reflect.Int8: + return 2, true + case reflect.Int16: + return binary.MaxVarintLen16, true + case reflect.Int32: + return binary.MaxVarintLen32, true + case reflect.Int64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return buf, nil +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return binary.MaxVarintLen64, true + case reflect.Uint8: + return 2, true + case reflect.Uint16: + return binary.MaxVarintLen16, true + case reflect.Uint32: + return binary.MaxVarintLen32, true + case reflect.Uint64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("non-equivalent argument count and index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} + +// CompoundMultiIndex is used to build an index using multiple +// sub-indexes. +// +// Unlike CompoundIndex, CompoundMultiIndex can have both +// SingleIndexer and MultiIndexer sub-indexers. However, each +// MultiIndexer adds considerable overhead/complexity in terms of +// the number of indexes created under-the-hood. It is not suggested +// to use more than one or two, if possible. +// +// Another change from CompoundIndexer is that if AllowMissing is +// set, not only is it valid to have empty index fields, but it will +// still create index values up to the first empty index. This means +// that if you have a value with an empty field, rather than using a +// prefix for lookup, you can simply pass in less arguments. As an +// example, if {Foo, Bar} is indexed but Bar is missing for a value +// and AllowMissing is set, an index will still be created for {Foo} +// and it is valid to do a lookup passing in only Foo as an argument. +// Note that the ordering isn't guaranteed -- it's last-insert wins, +// but this is true if you have two objects that have the same +// indexes not using AllowMissing anyways. +// +// Because StringMapFieldIndexers can take a varying number of args, +// it is currently a requirement that whenever it is used, two +// arguments must _always_ be provided for it. In theory we only +// need one, except a bug in that indexer means the single-argument +// version will never work. You can leave the second argument nil, +// but it will never produce a value. We support this for whenever +// that bug is fixed, likely in a next major version bump. +// +// Prefix-based indexing is not currently supported. +type CompoundMultiIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // CompoundMultiIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) { + // At each entry, builder is storing the results from the next index + builder := make([][][]byte, 0, len(c.Indexes)) + // Start with something higher to avoid resizing if possible + out := make([][]byte, 0, len(c.Indexes)^3) + +forloop: + // This loop goes through each indexer and adds the value(s) provided to the next + // entry in the slice. We can then later walk it like a tree to construct the indices. + for i, idxRaw := range c.Indexes { + switch idx := idxRaw.(type) { + case SingleIndexer: + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + builder = append(builder, [][]byte{val}) + + case MultiIndexer: + ok, vals, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + + // Add each of the new values to each of the old values + builder = append(builder, vals) + + default: + return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i) + } + } + + // We are walking through the builder slice essentially in a depth-first fashion, + // building the prefix and leaves as we go. If AllowMissing is false, we only insert + // these full paths to leaves. Otherwise, we also insert each prefix along the way. + // This allows for lookup in FromArgs when AllowMissing is true that does not contain + // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo + // field specified as "abc", it is valid to call FromArgs with just "abc". + var walkVals func([]byte, int) + walkVals = func(currPrefix []byte, depth int) { + if depth == len(builder)-1 { + // These are the "leaves", so append directly + for _, v := range builder[depth] { + out = append(out, append(currPrefix, v...)) + } + return + } + for _, v := range builder[depth] { + nextPrefix := append(currPrefix, v...) + if c.AllowMissing { + out = append(out, nextPrefix) + } + walkVals(nextPrefix, depth+1) + } + } + + walkVals(nil, 0) + + return true, out, nil +} + +func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) { + var stringMapCount int + var argCount int + for _, index := range c.Indexes { + if argCount >= len(args) { + break + } + if _, ok := index.(*StringMapFieldIndex); ok { + // We require pairs for StringMapFieldIndex, but only got one + if argCount+1 >= len(args) { + return nil, errors.New("invalid number of arguments") + } + stringMapCount++ + argCount += 2 + } else { + argCount++ + } + } + argCount = 0 + + switch c.AllowMissing { + case true: + if len(args) > len(c.Indexes)+stringMapCount { + return nil, errors.New("too many arguments") + } + + default: + if len(args) != len(c.Indexes)+stringMapCount { + return nil, errors.New("number of arguments does not equal number of indexers") + } + } + + var out []byte + var val []byte + var err error + for i, idx := range c.Indexes { + if argCount >= len(args) { + // We're done; should only hit this if AllowMissing + break + } + if _, ok := idx.(*StringMapFieldIndex); ok { + if args[argCount+1] == nil { + val, err = idx.FromArgs(args[argCount]) + } else { + val, err = idx.FromArgs(args[argCount : argCount+2]...) + } + argCount += 2 + } else { + val, err = idx.FromArgs(args[argCount]) + argCount++ + } + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 00000000..65c92073 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,97 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction, in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot +// of the database that will not be affected by any write +// operations to the existing DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 00000000..e6a9b526 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 00000000..2b85087e --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,644 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + modified map[tableIndex]*iradix.Txn +} + +// readableIndex returns a transaction usable for reading the given +// index in a table. If a write transaction is in progress, we may need +// to use an existing modified txn. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + return nil +} + +// Delete is used to delete a single object from the given table +// This object must already exist in the table +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check fi we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results +// from a Get query on a table. +type ResultIterator interface { + WatchCh() <-chan struct{} + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the +// rows that match the given constraints of an index. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.Iterator() + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 00000000..a6f01213 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,129 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch is used to wait for either the watch set to trigger or a timeout. +// Returns true on timeout. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx is used to wait for either the watch set to trigger or for the +// context to be cancelled. Watch with a timeout channel can be mimicked by +// creating a context with a deadline. WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(ctx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 00000000..880f098b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 00000000..ccae99f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 00000000..c14d810a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md new file mode 100644 index 00000000..6c95d1bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/README.md @@ -0,0 +1,174 @@ +# Codec + +High Performance and Feature-Rich Idiomatic Go Library providing +encode/decode support for different serialization formats. + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Representative Benchmark Results + +A sample run of benchmark using "go test -bi -bench=. -benchmem": + + /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) + + .............................................. + BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT + To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." + Benchmark: + Struct recursive Depth: 1 + ApproxDeepSize Of benchmark Struct: 4694 bytes + Benchmark One-Pass Run: + v-msgpack: len: 1600 bytes + bson: len: 3025 bytes + msgpack: len: 1560 bytes + binc: len: 1187 bytes + gob: len: 1972 bytes + json: len: 2538 bytes + .............................................. + PASS + Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op + Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op + Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op + Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op + Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op + Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op + Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op + Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op + Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op + Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op + Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op + Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op + Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op + Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op + ok ugorji.net/codec 30.827s + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext\_dep\_test.go + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go new file mode 100644 index 00000000..2bb5e8fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -0,0 +1,786 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "math" + // "reflect" + // "sync/atomic" + "time" + //"fmt" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +//var _ = fmt.Printf + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + w encWriter + m map[string]uint16 // symbols + s uint32 // symbols sequencer + b [8]byte +} + +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) encodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) encodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: + e.encUint(bincVdPosInt<<4, true, uint64(v)) + case v == -1: + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + default: + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) encodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + switch { + case v == 0: + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + case pos && v >= 1 && v <= 16: + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + case v <= math.MaxUint8: + e.w.writen2(bd|0x0, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.encIntegerPrune(bd, pos, v, 4) + default: + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) encodeArrayPreamble(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeMapPreamble(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) + return + case 1: + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writeUint16(ui) + } + } else { + e.s++ + ui = uint16(e.s) + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + switch { + case l <= math.MaxUint8: + // lenprec = 0 + case l <= math.MaxUint16: + lenprec = 1 + case int64(l) <= math.MaxUint32: + lenprec = 2 + default: + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writeUint16(ui) + } + switch lenprec { + case 0: + e.w.writen1(byte(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd | 0x02) + e.w.writeUint32(uint32(v)) + default: + e.w.writen1(bd | 0x03) + e.w.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecDriver struct { + r decReader + bdRead bool + bdType valueType + bd byte + vd byte + vs byte + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) +} + +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } + } + return d.bdType +} + +func (d *bincDecDriver) tryDecodeAsNil() bool { + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + case bincFlBin64: + d.decFloatPre(vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 3: + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:])) + case 7: + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) + default: + decErr("unsigned integers with greater than 64 bits of precision not supported") + } + return +} + +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: + ui = d.decUint() + i = int64(ui) + case bincVdNegInt: + ui = d.decUint() + i = -(int64(ui)) + neg = true + case bincVdSmallInt: + i = int64(d.vs) + 1 + ui = uint64(d.vs) + 1 + case bincVdSpecial: + switch d.vs { + case bincSpZero: + //i = 0 + case bincSpNegOne: + neg = true + ui = 1 + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } + return +} + +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: + d.bdRead = false + switch d.vs { + case bincSpNan: + return math.NaN() + case bincSpPosInf: + return math.Inf(1) + case bincSpZeroFloat, bincSpZero: + return + case bincSpNegInf: + return math.Inf(-1) + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } + case bincVdFloat: + f = d.decFloat() + default: + _, i, _ := d.decIntAny() + f = float64(i) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): + // b = false + case (bincVdSpecial | bincSpTrue): + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) readMapLen() (length int) { + if d.vd != bincVdMap { + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) readArrayLen() (length int) { + if d.vd != bincVdArray { + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs <= 3 { + return int(d.decUint()) + } + return int(d.vs - 4) +} + +func (d *bincDecDriver) decodeString() (s string) { + switch d.vd { + case bincVdString, bincVdByteArray: + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) + } + case bincVdSymbol: + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint32 + vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) + if vs&0x8 == 0 { + symbol = uint32(d.r.readn1()) + } else { + symbol = uint32(d.r.readUint16()) + } + if d.m == nil { + d.m = make(map[uint32]string, 16) + } + + if vs&0x4 == 0 { + s = d.m[symbol] + } else { + var slen int + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(d.r.readUint16()) + case 2: + slen = int(d.r.readUint32()) + case 3: + slen = int(d.r.readUint64()) + } + s = string(d.r.readn(slen)) + d.m[symbol] = s + } + default: + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + var clen int + switch d.vd { + case bincVdString, bincVdByteArray: + clen = d.decLen() + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + vt = valueTypeNil + case bincSpFalse: + vt = valueTypeBool + v = false + case bincSpTrue: + vt = valueTypeBool + v = true + case bincSpNan: + vt = valueTypeFloat + v = math.NaN() + case bincSpPosInf: + vt = valueTypeFloat + v = math.Inf(1) + case bincSpNegInf: + vt = valueTypeFloat + v = math.Inf(-1) + case bincSpZeroFloat: + vt = valueTypeFloat + v = float64(0) + case bincSpZero: + vt = valueTypeUint + v = int64(0) // int8(0) + case bincSpNegOne: + vt = valueTypeInt + v = int64(-1) // int8(-1) + default: + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + vt = valueTypeUint + v = d.decUint() + case bincVdNegInt: + vt = valueTypeInt + v = -(int64(d.decUint())) + case bincVdFloat: + vt = valueTypeFloat + v = d.decFloat() + case bincVdSymbol: + vt = valueTypeSymbol + v = d.decodeString() + case bincVdString: + vt = valueTypeString + v = d.decodeString() + case bincVdByteArray: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case bincVdTimestamp: + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + v = tt + case bincVdCustomExt: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case bincVdArray: + vt = valueTypeArray + decodeFurther = true + case bincVdMap: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle +} + +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} +} + +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} +} + +func (_ *BincHandle) writeExt() bool { + return true +} + +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go new file mode 100644 index 00000000..851b54ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -0,0 +1,1048 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" + // "runtime/debug" +) + +// Some tagging information for error messages. +const ( + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + readn(n int) []byte + readb([]byte) + readn1() uint8 + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 +} + +type decDriver interface { + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int +} + +type DecodeOptions struct { + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + // ErrorIfNoField controls whether an error is returned when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool +} + +// ------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency +} + +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + panic(err) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + panic(err) + } +} + +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + return b + } + z.readb(z.x[:1]) + return z.x[0] +} + +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) +} + +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) +} + +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) +} + +// ------------------------------------ + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available +} + +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) + } + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) + } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return +} + +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + c0 := z.consume(n) + bs = z.b[c0:z.c] + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) +} + +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] +} + +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. + +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +} + +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) +} + +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) +} + +// ------------------------------------ + +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryUnmarshaler) + } + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) + return + } + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { + return + } + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { + case valueTypeMap: + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true + } + case valueTypeArray: + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true + } + case valueTypeExt: + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + if bfn == nil { + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) + } + rv.Set(rvn) + return + } + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } + } + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() + if containerLen == 0 { + return + } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) + if k := fti.indexForEncName(rvkencname); k > -1 { + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) + } else { + f.d.decEmbeddedField(rv, sfik.is) + } + // f.d.decodeValue(ti.field(k, rv)) + } else { + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() + if containerLen == 0 { + return + } + for j, si := range fti.sfip { + if j == containerLen { + break + } + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) + } else { + f.d.decEmbeddedField(rv, si.is) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } else { + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + currEncodedType := f.dd.currentEncodedType() + + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) + } + return + } + } + + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return + } + } + + containerLen, containerLenS := decContLens(f.dd, currEncodedType) + + // an array can never return a nil slice. so no need to check f.array here. + + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + } + + if containerLen == 0 { + return + } + + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) + } + + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } + + containerLen := f.dd.readMapLen() + + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } + + if containerLen == 0 { + return + } + + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) + + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() || !rvv.CanSet() { + rvv = reflect.New(vtype).Elem() + } + + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} + +// ---------------------------------------- + +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + z := bytesDecReader{ + b: in, + a: len(in), + } + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() + + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") + + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) + + case *string: + *v = d.d.decodeString() + case *bool: + *v = d.d.decodeBool() + case *int: + *v = int(d.d.decodeInt(intBitsize)) + case *int8: + *v = int8(d.d.decodeInt(8)) + case *int16: + *v = int16(d.d.decodeInt(16)) + case *int32: + *v = int32(d.d.decodeInt(32)) + case *int64: + *v = d.d.decodeInt(64) + case *uint: + *v = uint(d.d.decodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.decodeUint(8)) + case *uint16: + *v = uint16(d.d.decodeUint(16)) + case *uint32: + *v = uint32(d.d.decodeUint(32)) + case *uint64: + *v = d.d.decodeUint(64) + case *float32: + *v = float32(d.d.decodeFloat(true)) + case *float64: + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + + default: + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() + + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) + } + } + + fn.f(fn.i, rv) + + return +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + if !rv.IsValid() { + decErr("Cannot decode into a zero (ie invalid) reflect.Value") + } + if !rv.CanInterface() { + decErr("Cannot decode into a value without an interface: %v", rv) + } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) +} + +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) + } + d.decodeValue(rv) +} + +// -------------------------------------------------- + +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +// ---------------------------------------- + +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) + } + return +} + +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go new file mode 100644 index 00000000..4914be0c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -0,0 +1,1001 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" +) + +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. +type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) +} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map. + StructToArray bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) +} + +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) +} + +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) +} + +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) +} + +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } + +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) + return + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) + return + } + } + + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + return + } + + l := rv.Len() + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) + + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return + } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() + } else { + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) + } + } + f.ee.encodeStringBytes(c_RAW, bs) + return + } + + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + encnames = make([]string, newlen) + } + newlen = 0 + for _, si := range tisfi { + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } + if toMap { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + continue + } + encnames[newlen] = si.encName + } else { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil + } + } + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + if toMap { + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) + } + e.encodeValue(rvals[j]) + } + } else { + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) + } + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + f.e.encodeValue(rv.Elem()) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } + } + + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { + return + } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if keyTypeIsString { + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) + } else { + f.ee.encodeString(c_UTF8, mks[j].String()) + } + } else { + f.e.encodeValue(mks[j]) + } + f.e.encodeValue(rv.MapIndex(mks[j])) + } + +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + w encWriter + e encDriver + h *BasicHandle + hh Handle + f map[uintptr]encFn + x []uintptr + s []encFn +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// Encode writes an object into a stream in the codec format. +// +// Encoding can be configured via the "codec" struct tag for the fields. +// +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the codec tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. +// +// Examples: +// +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +func (e *Encoder) encode(iv interface{}) { + switch v := iv.(type) { + case nil: + e.e.encodeNil() + + case reflect.Value: + e.encodeValue(v) + + case string: + e.e.encodeString(c_UTF8, v) + case bool: + e.e.encodeBool(v) + case int: + e.e.encodeInt(int64(v)) + case int8: + e.e.encodeInt(int64(v)) + case int16: + e.e.encodeInt(int64(v)) + case int32: + e.e.encodeInt(int64(v)) + case int64: + e.e.encodeInt(v) + case uint: + e.e.encodeUint(uint64(v)) + case uint8: + e.e.encodeUint(uint64(v)) + case uint16: + e.e.encodeUint(uint64(v)) + case uint32: + e.e.encodeUint(uint64(v)) + case uint64: + e.e.encodeUint(v) + case float32: + e.e.encodeFloat32(v) + case float64: + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) + case []uint8: + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) + + case *string: + e.e.encodeString(c_UTF8, *v) + case *bool: + e.e.encodeBool(*v) + case *int: + e.e.encodeInt(int64(*v)) + case *int8: + e.e.encodeInt(int64(*v)) + case *int16: + e.e.encodeInt(int64(*v)) + case *int32: + e.e.encodeInt(int64(*v)) + case *int64: + e.e.encodeInt(*v) + case *uint: + e.e.encodeUint(uint64(*v)) + case *uint8: + e.e.encodeUint(uint64(*v)) + case *uint16: + e.e.encodeUint(uint64(*v)) + case *uint32: + e.e.encodeUint(uint64(*v)) + case *uint64: + e.e.encodeUint(*v) + case *float32: + e.e.encodeFloat32(*v) + case *float64: + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) + case *[]uint8: + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) + + default: + e.encodeValue(reflect.ValueOf(iv)) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + e.e.encodeNil() + return + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) + } + } + + fn.f(fn.i, rv) + +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return + } + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) + } else { + e.e.encodeStringBytes(c_RAW, re.Data) + } +} + +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) + } +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) + } +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) + } +} + +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) + } +} + +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } +} + +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) + } +} + +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go new file mode 100644 index 00000000..7da3955e --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -0,0 +1,596 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +const ( + structTagName = "codec" + + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // This constant flag will enable or disable it. + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // if checkStructForEmptyValue, check structs fields to see if an empty value. + // This could be an expensive call, so possibly disable it. + checkStructForEmptyValue = false + + // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue + derefForIsEmptyValue = false +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + valueTypeInvalid = 0xff +) + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) + + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + extHandle + EncodeOptions + DecodeOptions +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} + +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return + // } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } + } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil +} + +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn + } + return +} + +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn + } + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn + } + return +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") + } + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } + + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } + + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return +} + +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) +} + +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } +} + +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { + return + } + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go new file mode 100644 index 00000000..93f12854 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) +} + +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go new file mode 100644 index 00000000..da0500d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -0,0 +1,816 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + w encWriter + h *MsgpackHandle +} + +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: + e.w.writen2(mpInt8, byte(i)) + case i >= math.MinInt16: + e.w.writen1(mpInt16) + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: + e.w.writen1(mpInt32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpInt64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: + e.w.writen2(mpUint8, byte(i)) + case i <= math.MaxUint16: + e.w.writen1(mpUint16) + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: + e.w.writen1(mpUint32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpUint64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) encodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) encodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + switch { + case l == 1: + e.w.writen2(mpFixExt1, xtag) + case l == 2: + e.w.writen2(mpFixExt2, xtag) + case l == 4: + e.w.writen2(mpFixExt4, xtag) + case l == 8: + e.w.writen2(mpFixExt8, xtag) + case l == 16: + e.w.writen2(mpFixExt16, xtag) + case l < 256: + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + case l < 65536: + e.w.writen1(mpExt16) + e.w.writeUint16(uint16(l)) + e.w.writen1(xtag) + default: + e.w.writen1(mpExt32) + e.w.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) encodeMapPreamble(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: + e.w.writen1(ct.bFixMin | byte(l)) + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + e.w.writen2(ct.b8, uint8(l)) + case l < 65536: + e.w.writen1(ct.b16) + e.w.writeUint16(uint16(l)) + default: + e.w.writen1(ct.b32) + e.w.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + r decReader + h *MsgpackHandle + bd byte + bdRead bool + bdType valueType +} + +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + bd := d.bd + + switch bd { + case mpNil: + vt = valueTypeNil + d.bdRead = false + case mpFalse: + vt = valueTypeBool + v = false + case mpTrue: + vt = valueTypeBool + v = true + + case mpFloat: + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) + + case mpUint8: + vt = valueTypeUint + v = uint64(d.r.readn1()) + case mpUint16: + vt = valueTypeUint + v = uint64(d.r.readUint16()) + case mpUint32: + vt = valueTypeUint + v = uint64(d.r.readUint32()) + case mpUint64: + vt = valueTypeUint + v = uint64(d.r.readUint64()) + + case mpInt8: + vt = valueTypeInt + v = int64(int8(d.r.readn1())) + case mpInt16: + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) + case mpInt32: + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) + case mpInt64: + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + vt = valueTypeInt + v = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + vt = valueTypeInt + v = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm + } else { + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + } + decodeFurther = true + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + vt = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + vt = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + clen := d.readExtLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt + default: + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(d.r.readUint16())) + case mpUint32: + i = int64(uint64(d.r.readUint32())) + case mpUint64: + i = int64(d.r.readUint64()) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(d.r.readUint16())) + case mpInt32: + i = int64(int32(d.r.readUint32())) + case mpInt64: + i = int64(d.r.readUint64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(d.r.readUint16()) + case mpUint32: + ui = uint64(d.r.readUint32()) + case mpUint64: + ui = d.r.readUint64() + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(d.r.readUint16())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(d.r.readUint32())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(d.r.readUint64()); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: + // b = false + case mpTrue, 1: + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) + } + d.bdRead = false + return +} + +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + var clen int + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) + } + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + } + return d.bdType +} + +func (d *msgpackDecDriver) tryDecodeAsNil() bool { + if d.bd == mpNil { + d.bdRead = false + return true + } + return false +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + switch { + case bd == mpNil: + clen = -1 // to represent nil + case bd == ct.b8: + clen = int(d.r.readn1()) + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: + clen = int(ct.bFixMin ^ bd) + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) readMapLen() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) readArrayLen() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(d.r.readUint16()) + case mpExt32: + clen = int(d.r.readUint32()) + default: + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + xbd := d.bd + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool +} + +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} +} + +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt +} + +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.cls { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py new file mode 100644 index 00000000..e933838c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +import msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464646464.0, + False, + True, + None, + "someday", + "", + "bytestring", + 1328176922000002000, + -2206187877999998000, + 0, + -6795364578871345152 + ] + l1 = [ + { "true": True, + "false": False }, + { "true": "True", + "false": False, + "uint16(1616)": 1616 }, + { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], + "int32":32323232, "bool": True, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890" }, + { True: "true", 8: False, "false": 0 } + ] + + l = [] + l.extend(l0) + l.append(l0) + l.extend(l1) + return l + +def build_test_data(destdir): + l = get_test_data_list() + for i in range(len(l)): + packer = msgpack.Packer() + serialized = packer.pack(l[i]) + f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') + f.write(serialized) + f.close() + +def doRpcServer(port, stopTimeSec): + class EchoHandler(object): + def Echo123(self, msg1, msg2, msg3): + return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) + def EchoStruct(self, msg): + return ("%s" % msg) + + addr = msgpackrpc.Address('localhost', port) + server = msgpackrpc.Server(EchoHandler()) + server.listen(addr) + # run thread to stop it after stopTimeSec seconds if > 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: msgpack_test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go new file mode 100644 index 00000000..d014dbdc --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush && c.bw != nil { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.cls { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.cls { + return io.EOF + } + c.cls = true + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go new file mode 100644 index 00000000..9e4d148a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import "math" + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + h *SimpleHandle + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) encodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) encodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) encodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) encodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, uint8(v)) + case v <= math.MaxUint16: + e.w.writen1(bd + 1) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd + 2) + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: + e.w.writen1(bd + 3) + e.w.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + switch { + case length == 0: + e.w.writen1(bd) + case length <= math.MaxUint8: + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + case length <= math.MaxUint16: + e.w.writen1(bd + 2) + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: + e.w.writen1(bd + 3) + e.w.writeUint32(uint32(length)) + default: + e.w.writen1(bd + 4) + e.w.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) encodeArrayPreamble(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) encodeMapPreamble(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + h *SimpleHandle + r decReader + bdRead bool + bdType valueType + bd byte + //b [8]byte +} + +func (d *simpleDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } + } + return d.bdType +} + +func (d *simpleDecDriver) tryDecodeAsNil() bool { + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + i = int64(ui) + case simpleVdPosInt + 1: + ui = uint64(d.r.readUint16()) + i = int64(ui) + case simpleVdPosInt + 2: + ui = uint64(d.r.readUint32()) + i = int64(ui) + case simpleVdPosInt + 3: + ui = uint64(d.r.readUint64()) + i = int64(ui) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 1: + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 2: + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 3: + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) + neg = true + default: + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // } + return +} + +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + _, i, _ := d.decIntAny() + f = float64(i) + } else { + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + } + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: + b = true + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) readMapLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) readArrayLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(d.r.readUint16()) + case 3: + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) + return int(ui) + case 4: + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) + return int(ui) + } + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.bd { + case simpleVdNil: + vt = valueTypeNil + case simpleVdFalse: + vt = valueTypeBool + v = false + case simpleVdTrue: + vt = valueTypeBool + v = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i + case simpleVdFloat32: + vt = valueTypeFloat + v = d.decodeFloat(true) + case simpleVdFloat64: + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle +} + +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} +} + +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} +} + +func (_ *SimpleHandle) writeExt() bool { + return true +} + +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 00000000..c86d6532 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 00000000..4befed30 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.idea diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 00000000..fe305ad5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,168 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 00000000..bc56559c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1025 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 00000000..d22c566e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 00000000..22a7baa6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 00000000..f3ddf44e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,17 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 00000000..21b14e99 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 00000000..daf142d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000..d0d0d8e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,117 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 00000000..1a8a8e70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000..d3dbf1ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 00000000..aa2fdc81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 00000000..b6850aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 00000000..3fa79e8a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 00000000..38b42043 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 00000000..345d0a1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000..fb2ef930 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 00000000..88955245 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 00000000..01c45ad7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 00000000..79d96746 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 00000000..88c999a5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 00000000..70ba546b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 00000000..9f7b0180 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000..0cfc19e5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 00000000..f30a4b1d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 00000000..5bb18dd5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 00000000..4c230e3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,452 @@ +package plugin + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to os.Stderr. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Accept connections and wait for completion + go server.Serve(listener) + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 00000000..033079ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 00000000..1d547aaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 00000000..2cf2c26c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-raftchunking/LICENSE b/vendor/github.com/hashicorp/go-raftchunking/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-raftchunking/Makefile b/vendor/github.com/hashicorp/go-raftchunking/Makefile new file mode 100644 index 00000000..2b2b3963 --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/Makefile @@ -0,0 +1,6 @@ +# Determine this makefile's path. +# Be sure to place this BEFORE `include` directives, if any. +THIS_FILE := $(lastword $(MAKEFILE_LIST)) + +proto: + protoc --go_out=paths=source_relative:. types/types.proto diff --git a/vendor/github.com/hashicorp/go-raftchunking/api.go b/vendor/github.com/hashicorp/go-raftchunking/api.go new file mode 100644 index 00000000..d6475e37 --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/api.go @@ -0,0 +1,166 @@ +package raftchunking + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "time" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-raftchunking/types" + "github.com/hashicorp/raft" +) + +var ( + // ChunkSize is the threshold used for breaking a large value into chunks. + // Defaults to the suggested max data size for the raft library. + ChunkSize = raft.SuggestedMaxDataSize +) + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// multiFuture is a future specialized for the chunking case. It contains some +// number of other futures in the order in which data was chunked and sent to +// apply. +type multiFuture []raft.ApplyFuture + +// Error will return only when all Error functions in the contained futures +// return, in order. +func (m multiFuture) Error() error { + for _, v := range m { + if err := v.Error(); err != nil { + return err + } + } + + return nil +} + +// Index returns the index of the last chunk. Since required behavior is to not +// call this until Error is called, the last Index will correspond to the Apply +// of the final chunk. +func (m multiFuture) Index() uint64 { + // This shouldn't happen but need an escape hatch + if len(m) == 0 { + return 0 + } + + return m[len(m)-1].Index() +} + +// Response returns the response from underlying Apply of the last chunk. +func (m multiFuture) Response() interface{} { + // This shouldn't happen but need an escape hatch + if len(m) == 0 { + return nil + } + + return m[len(m)-1].Response() +} + +type ApplyFunc func(raft.Log, time.Duration) raft.ApplyFuture + +// ChunkingApply takes in a byte slice and chunks into ChunkSize (or less if +// EOF) chunks, calling Apply on each. It requires a corresponding wrapper +// around the FSM to handle reconstructing on the other end. Timeout will be the +// timeout for each individual operation, not total. The return value is a +// future whose Error() will return only when all underlying Apply futures have +// had Error() return. Note that any error indicates that the entire operation +// will not be applied, assuming the correct FSM wrapper is used. If extensions +// is passed in, it will be set as the Extensions value on the Apply once all +// chunks are received. +func ChunkingApply(cmd, extensions []byte, timeout time.Duration, applyFunc ApplyFunc) raft.ApplyFuture { + // Generate a random op num via 64 random bits. These only have to be + // unique across _in flight_ chunk operations until a Term changes so + // should be fine. + rb := make([]byte, 8) + n, err := rand.Read(rb) + if err != nil { + return errorFuture{err: err} + } + if n != 8 { + return errorFuture{err: fmt.Errorf("expected to read %d bytes for op num, read %d", 8, n)} + } + opNum := binary.BigEndian.Uint64(rb) + + var logs []raft.Log + var byteChunks [][]byte + var mf multiFuture + + // We break into chunks first so that we know how many chunks there will be + // to put in NumChunks in the extensions info. This could probably be a bit + // more efficient by just reslicing but doing it this way is a bit easier + // for others to follow/track and in this kind of operation this won't be + // the slow part anyways. + reader := bytes.NewReader(cmd) + remain := reader.Len() + for { + if remain <= 0 { + break + } + + if remain > ChunkSize { + remain = ChunkSize + } + + b := make([]byte, remain) + n, err := reader.Read(b) + if err != nil && err != io.EOF { + return errorFuture{err: err} + } + if n != remain { + return errorFuture{err: fmt.Errorf("expected to read %d bytes from buf, read %d", remain, n)} + } + + byteChunks = append(byteChunks, b) + remain = reader.Len() + } + + // Create the underlying chunked logs + for i, chunk := range byteChunks { + chunkInfo := &types.ChunkInfo{ + OpNum: opNum, + SequenceNum: uint32(i), + NumChunks: uint32(len(byteChunks)), + } + + // If extensions were passed in attach them to the last chunk so it + // will go through Apply at the end. + if i == len(byteChunks)-1 { + chunkInfo.NextExtensions = extensions + } + + chunkBytes, err := proto.Marshal(chunkInfo) + if err != nil { + return errorFuture{err: errwrap.Wrapf("error marshaling chunk info: {{err}}", err)} + } + logs = append(logs, raft.Log{ + Data: chunk, + Extensions: chunkBytes, + }) + } + + for _, log := range logs { + mf = append(mf, applyFunc(log, timeout)) + } + + return mf +} diff --git a/vendor/github.com/hashicorp/go-raftchunking/chunking.go b/vendor/github.com/hashicorp/go-raftchunking/chunking.go new file mode 100644 index 00000000..8578b05b --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/chunking.go @@ -0,0 +1,103 @@ +package raftchunking + +import "github.com/mitchellh/copystructure" + +type ChunkStorage interface { + // StoreChunk stores Data from ChunkInfo according to the other metadata + // (OpNum, SeqNum). The bool returns whether or not all chunks have been + // received, as in, the number of non-nil chunks is the same as NumChunks. + StoreChunk(*ChunkInfo) (bool, error) + + // FinalizeOp gets all chunks for an op number and then removes the chunk + // info for that op from the store. It should only be called when + // StoreChunk for a given op number returns true but should be safe to call + // at any time; clearing an op can be accomplished by calling this function + // and ignoring the non-error result. + FinalizeOp(uint64) ([]*ChunkInfo, error) + + // GetState gets all currently tracked ops, for snapshotting + GetChunks() (ChunkMap, error) + + // RestoreChunks restores the current FSM state from a map + RestoreChunks(ChunkMap) error +} + +type State struct { + ChunkMap ChunkMap +} + +// ChunkInfo holds chunk information +type ChunkInfo struct { + OpNum uint64 + SequenceNum uint32 + NumChunks uint32 + Term uint64 + Data []byte +} + +// ChunkMap represents a set of data chunks. We use ChunkInfo with Data instead +// of bare []byte in case there is a need to extend this info later. +type ChunkMap map[uint64][]*ChunkInfo + +// InmemChunkStorage satisfies ChunkStorage using an in-memory-only tracking +// method. +type InmemChunkStorage struct { + chunks ChunkMap +} + +func NewInmemChunkStorage() *InmemChunkStorage { + return &InmemChunkStorage{ + chunks: make(ChunkMap), + } +} + +func (i *InmemChunkStorage) StoreChunk(chunk *ChunkInfo) (bool, error) { + chunks, ok := i.chunks[chunk.OpNum] + if !ok { + chunks = make([]*ChunkInfo, chunk.NumChunks) + i.chunks[chunk.OpNum] = chunks + } + + chunks[chunk.SequenceNum] = chunk + + for _, c := range chunks { + // Check for nil, but also check data length in case it ends up + // unmarshaling weirdly for some reason where it makes a new struct + // instead of keeping the pointer nil + if c == nil || len(c.Data) == 0 { + // Not done yet, so return + return false, nil + } + } + + return true, nil +} + +func (i *InmemChunkStorage) FinalizeOp(opNum uint64) ([]*ChunkInfo, error) { + ret := i.chunks[opNum] + delete(i.chunks, opNum) + return ret, nil +} + +func (i *InmemChunkStorage) GetChunks() (ChunkMap, error) { + ret, err := copystructure.Copy(i.chunks) + if err != nil { + return nil, err + } + return ret.(ChunkMap), nil +} + +func (i *InmemChunkStorage) RestoreChunks(chunks ChunkMap) error { + // If passed in explicit emptiness, set state to empty + if chunks == nil || len(chunks) == 0 { + i.chunks = make(ChunkMap) + return nil + } + + chunksCopy, err := copystructure.Copy(chunks) + if err != nil { + return err + } + i.chunks = chunksCopy.(ChunkMap) + return nil +} diff --git a/vendor/github.com/hashicorp/go-raftchunking/fsm.go b/vendor/github.com/hashicorp/go-raftchunking/fsm.go new file mode 100644 index 00000000..374b4773 --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/fsm.go @@ -0,0 +1,258 @@ +package raftchunking + +import ( + "io" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-raftchunking/types" + "github.com/hashicorp/raft" +) + +var _ raft.FSM = (*ChunkingFSM)(nil) +var _ raft.ConfigurationStore = (*ChunkingConfigurationStore)(nil) +var _ raft.BatchingFSM = (*ChunkingBatchingFSM)(nil) + +type ChunkingSuccess struct { + Response interface{} +} + +// ChunkingFSM is an FSM that implements chunking; it's the sister of +// ChunkingApply. +// +// N.B.: If a term change happens the final apply from the client will have a +// nil result and not be passed through to the underlying FSM. To detect this, +// the final apply to the underlying FSM is wrapped in ChunkingSuccess. +type ChunkingFSM struct { + underlying raft.FSM + store ChunkStorage + lastTerm uint64 +} + +type ChunkingBatchingFSM struct { + *ChunkingFSM + underlyingBatchingFSM raft.BatchingFSM +} + +type ChunkingConfigurationStore struct { + *ChunkingFSM + underlyingConfigurationStore raft.ConfigurationStore +} + +func NewChunkingFSM(underlying raft.FSM, store ChunkStorage) *ChunkingFSM { + ret := &ChunkingFSM{ + underlying: underlying, + store: store, + } + if store == nil { + ret.store = NewInmemChunkStorage() + } + return ret +} + +func NewChunkingBatchingFSM(underlying raft.BatchingFSM, store ChunkStorage) *ChunkingBatchingFSM { + ret := &ChunkingBatchingFSM{ + ChunkingFSM: &ChunkingFSM{ + underlying: underlying, + store: store, + }, + underlyingBatchingFSM: underlying, + } + if store == nil { + ret.ChunkingFSM.store = NewInmemChunkStorage() + } + return ret +} + +func NewChunkingConfigurationStore(underlying raft.ConfigurationStore, store ChunkStorage) *ChunkingConfigurationStore { + ret := &ChunkingConfigurationStore{ + ChunkingFSM: &ChunkingFSM{ + underlying: underlying, + store: store, + }, + underlyingConfigurationStore: underlying, + } + if store == nil { + ret.ChunkingFSM.store = NewInmemChunkStorage() + } + return ret +} + +func (c *ChunkingFSM) applyChunk(l *raft.Log) (*raft.Log, error) { + if l.Term != c.lastTerm { + // Term has changed. A raft library client that was applying chunks + // should get an error that it's no longer the leader and bail, and + // then any client of (Consul, Vault, etc.) should then retry the full + // chunking operation automatically, which will be under a different + // opnum. So it should be safe in this case to clear the map. + if err := c.store.RestoreChunks(nil); err != nil { + return nil, err + } + c.lastTerm = l.Term + } + + // Get chunk info from extensions + var ci types.ChunkInfo + if err := proto.Unmarshal(l.Extensions, &ci); err != nil { + return nil, errwrap.Wrapf("error unmarshaling chunk info: {{err}}", err) + } + + // Store the current chunk and find out if all chunks have arrived + done, err := c.store.StoreChunk(&ChunkInfo{ + OpNum: ci.OpNum, + SequenceNum: ci.SequenceNum, + NumChunks: ci.NumChunks, + Term: l.Term, + Data: l.Data, + }) + if err != nil { + return nil, err + } + if !done { + return nil, nil + } + + // All chunks are here; get the full set and clear storage of the op + chunks, err := c.store.FinalizeOp(ci.OpNum) + if err != nil { + return nil, err + } + + finalData := make([]byte, 0, len(chunks)*raft.SuggestedMaxDataSize) + + for _, chunk := range chunks { + finalData = append(finalData, chunk.Data...) + } + + // Use the latest log's values with the final data + logToApply := &raft.Log{ + Index: l.Index, + Term: l.Term, + Type: l.Type, + Data: finalData, + Extensions: ci.NextExtensions, + } + + return logToApply, nil +} + +// Apply applies the log, handling chunking as needed. The return value will +// either be an error or whatever is returned from the underlying Apply. +func (c *ChunkingFSM) Apply(l *raft.Log) interface{} { + // Not chunking or wrong type, pass through + if l.Type != raft.LogCommand || l.Extensions == nil { + return c.underlying.Apply(l) + } + + logToApply, err := c.applyChunk(l) + if err != nil { + return err + } + + if logToApply != nil { + return ChunkingSuccess{Response: c.underlying.Apply(logToApply)} + } + + return nil +} + +func (c *ChunkingFSM) Snapshot() (raft.FSMSnapshot, error) { + return c.underlying.Snapshot() +} + +func (c *ChunkingFSM) Restore(rc io.ReadCloser) error { + return c.underlying.Restore(rc) +} + +// Note: this is used in tests via the Raft package test helper functions, even +// if it's not used in client code +func (c *ChunkingFSM) Underlying() raft.FSM { + return c.underlying +} + +func (c *ChunkingFSM) CurrentState() (*State, error) { + chunks, err := c.store.GetChunks() + if err != nil { + return nil, err + } + return &State{ + ChunkMap: chunks, + }, nil +} + +func (c *ChunkingFSM) RestoreState(state *State) error { + // If nil we'll restore to blank, so create a new state with a nil map + if state == nil { + state = new(State) + } + return c.store.RestoreChunks(state.ChunkMap) +} + +func (c *ChunkingConfigurationStore) StoreConfiguration(index uint64, configuration raft.Configuration) { + c.underlyingConfigurationStore.StoreConfiguration(index, configuration) +} + +// ApplyBatch applies the logs, handling chunking as needed. The return value will +// be an array containing an error or whatever is returned from the underlying +// Apply for each log. +func (c *ChunkingBatchingFSM) ApplyBatch(logs []*raft.Log) []interface{} { + // responses has a response for each log; their slice index should match. + responses := make([]interface{}, len(logs)) + + // sentLogs keeps track of which logs we sent. The key is the raft Index + // associated with the log and the value is true if this is a finalized set + // of chunks. + sentLogs := make(map[uint64]bool) + + // sendLogs is the subset of logs that we need to pass onto the underlying + // FSM. + sendLogs := make([]*raft.Log, 0, len(logs)) + + for i, l := range logs { + // Not chunking or wrong type, pass through + if l.Type != raft.LogCommand || l.Extensions == nil { + sendLogs = append(sendLogs, l) + sentLogs[l.Index] = false + continue + } + + logToApply, err := c.applyChunk(l) + if err != nil { + responses[i] = err + continue + } + + if logToApply != nil { + sendLogs = append(sendLogs, logToApply) + sentLogs[l.Index] = true + } + } + + // Send remaining logs to the underlying FSM. + var sentResponses []interface{} + if len(sendLogs) > 0 { + sentResponses = c.underlyingBatchingFSM.ApplyBatch(sendLogs) + } + + var sentCounter int + for j, l := range logs { + // If the response is already set we errored above and should continue + // onto the next. + if responses[j] != nil { + continue + } + + var resp interface{} + if chunked, ok := sentLogs[l.Index]; ok { + resp = sentResponses[sentCounter] + if chunked { + resp = ChunkingSuccess{Response: sentResponses[sentCounter]} + } + sentCounter++ + } + + responses[j] = resp + } + + return responses +} diff --git a/vendor/github.com/hashicorp/go-raftchunking/go.mod b/vendor/github.com/hashicorp/go-raftchunking/go.mod new file mode 100644 index 00000000..8e126cdb --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/go.mod @@ -0,0 +1,12 @@ +module github.com/hashicorp/go-raftchunking + +go 1.12 + +require ( + github.com/go-test/deep v1.0.2 + github.com/golang/protobuf v1.3.1 + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 + github.com/kr/pretty v0.1.0 + github.com/mitchellh/copystructure v1.0.0 +) diff --git a/vendor/github.com/hashicorp/go-raftchunking/go.sum b/vendor/github.com/hashicorp/go-raftchunking/go.sum new file mode 100644 index 00000000..94980752 --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/go.sum @@ -0,0 +1,63 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/go-raftchunking/types/types.pb.go b/vendor/github.com/hashicorp/go-raftchunking/types/types.pb.go new file mode 100644 index 00000000..4a0764fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/types/types.pb.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: types/types.proto + +package types + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ChunkInfo struct { + // OpNum is the ID of the op, used to ensure values are applied to the + // right operation + OpNum uint64 `protobuf:"varint,1,opt,name=op_num,json=opNum,proto3" json:"op_num,omitempty"` + // SequenceNum is the current number of the ops; when applying we should + // see this start at zero and increment by one without skips + SequenceNum uint32 `protobuf:"varint,2,opt,name=sequence_num,json=sequenceNum,proto3" json:"sequence_num,omitempty"` + // NumChunks is used to check whether all chunks have been received and + // reconstruction should be attempted + NumChunks uint32 `protobuf:"varint,3,opt,name=num_chunks,json=numChunks,proto3" json:"num_chunks,omitempty"` + // NextExtensions holds inner extensions information for the next layer + // down of Apply + NextExtensions []byte `protobuf:"bytes,4,opt,name=next_extensions,json=nextExtensions,proto3" json:"next_extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkInfo) Reset() { *m = ChunkInfo{} } +func (m *ChunkInfo) String() string { return proto.CompactTextString(m) } +func (*ChunkInfo) ProtoMessage() {} +func (*ChunkInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2c0f90c600ad7e2e, []int{0} +} + +func (m *ChunkInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChunkInfo.Unmarshal(m, b) +} +func (m *ChunkInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChunkInfo.Marshal(b, m, deterministic) +} +func (m *ChunkInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkInfo.Merge(m, src) +} +func (m *ChunkInfo) XXX_Size() int { + return xxx_messageInfo_ChunkInfo.Size(m) +} +func (m *ChunkInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkInfo proto.InternalMessageInfo + +func (m *ChunkInfo) GetOpNum() uint64 { + if m != nil { + return m.OpNum + } + return 0 +} + +func (m *ChunkInfo) GetSequenceNum() uint32 { + if m != nil { + return m.SequenceNum + } + return 0 +} + +func (m *ChunkInfo) GetNumChunks() uint32 { + if m != nil { + return m.NumChunks + } + return 0 +} + +func (m *ChunkInfo) GetNextExtensions() []byte { + if m != nil { + return m.NextExtensions + } + return nil +} + +func init() { + proto.RegisterType((*ChunkInfo)(nil), "github_com_hashicorp_go_raftchunking_types.ChunkInfo") +} + +func init() { proto.RegisterFile("types/types.proto", fileDescriptor_2c0f90c600ad7e2e) } + +var fileDescriptor_2c0f90c600ad7e2e = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x31, 0x4b, 0xc6, 0x30, + 0x10, 0x86, 0x89, 0xb6, 0x95, 0xc6, 0xaa, 0x18, 0x10, 0xba, 0x08, 0xd5, 0xc5, 0xe2, 0xa0, 0x83, + 0xff, 0x40, 0x71, 0x70, 0x71, 0xe8, 0xe8, 0x72, 0xb4, 0x21, 0x6d, 0x82, 0xe4, 0x2e, 0x36, 0x09, + 0xd4, 0x1f, 0xe1, 0x7f, 0x96, 0x1e, 0x7c, 0xdf, 0x72, 0xc3, 0x73, 0xef, 0x3d, 0xdc, 0x2b, 0xaf, + 0xd3, 0x6f, 0x30, 0xf1, 0x99, 0xe7, 0x53, 0x58, 0x29, 0x91, 0x7a, 0x5c, 0x5c, 0xb2, 0x79, 0x02, + 0x4d, 0x1e, 0xec, 0x18, 0xad, 0xd3, 0xb4, 0x06, 0x58, 0x08, 0xd6, 0x71, 0x4e, 0xda, 0x66, 0xfc, + 0x76, 0xb8, 0x00, 0x5f, 0xdc, 0xff, 0x09, 0x59, 0xbf, 0xed, 0xe8, 0x03, 0x67, 0x52, 0x37, 0xb2, + 0xa2, 0x00, 0x98, 0x7d, 0x2b, 0x3a, 0xd1, 0x17, 0x43, 0x49, 0xe1, 0x33, 0x7b, 0x75, 0x27, 0x9b, + 0x68, 0x7e, 0xb2, 0x41, 0x6d, 0x78, 0x79, 0xd2, 0x89, 0xfe, 0x62, 0x38, 0x3f, 0xb0, 0x3d, 0x72, + 0x2b, 0x25, 0x66, 0x0f, 0x6c, 0x8f, 0xed, 0x29, 0x07, 0x6a, 0xcc, 0x9e, 0xdd, 0x51, 0x3d, 0xc8, + 0x2b, 0x34, 0x5b, 0x02, 0xb3, 0x25, 0x83, 0xd1, 0x11, 0xc6, 0xb6, 0xe8, 0x44, 0xdf, 0x0c, 0x97, + 0x3b, 0x7e, 0x3f, 0xd2, 0xd7, 0xb3, 0xaf, 0x92, 0x1f, 0x9b, 0x2a, 0xee, 0xf2, 0xf2, 0x1f, 0x00, + 0x00, 0xff, 0xff, 0x4d, 0x98, 0xf7, 0x77, 0xe0, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-raftchunking/types/types.proto b/vendor/github.com/hashicorp/go-raftchunking/types/types.proto new file mode 100644 index 00000000..51c1f8e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-raftchunking/types/types.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +option go_package = "types"; + +package github_com_hashicorp_go_raftchunking_types; + +message ChunkInfo { + // OpNum is the ID of the op, used to ensure values are applied to the + // right operation + uint64 op_num = 1; + + // SequenceNum is the current number of the ops; when applying we should + // see this start at zero and increment by one without skips + uint32 sequence_num = 2; + + // NumChunks is used to check whether all chunks have been received and + // reconstruction should be attempted + uint32 num_chunks = 3; + + // NextExtensions holds inner extensions information for the next layer + // down of Apply + bytes next_extensions = 4; +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index d5e250a5..b299f8d9 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -34,9 +34,11 @@ import ( "net/url" "os" "strings" + "sync" "time" cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" ) var ( @@ -228,6 +230,16 @@ type Logger interface { Printf(string, ...interface{}) } +// To adapt an hclog.Logger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + logger hclog.Logger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.logger.Info(fmt.Sprintf(s, args...)) +} + // RequestLogHook allows a function to run before each retry. The HTTP // request which will be made, and the retry number (0 for the initial // request) are available to users. The internal logger is exposed to @@ -266,7 +278,7 @@ type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Resp // like automatic retries to tolerate minor outages. type Client struct { HTTPClient *http.Client // Internal HTTP client. - Logger Logger // Customer logger instance. + Logger interface{} // Customer logger instance. Can be either Logger or hclog.Logger RetryWaitMin time.Duration // Minimum time to wait RetryWaitMax time.Duration // Maximum time to wait @@ -289,12 +301,14 @@ type Client struct { // ErrorHandler specifies the custom error handler to use, if any ErrorHandler ErrorHandler + + loggerInit sync.Once } // NewClient creates a new Client with default settings. func NewClient() *Client { return &Client{ - HTTPClient: cleanhttp.DefaultClient(), + HTTPClient: cleanhttp.DefaultPooledClient(), Logger: log.New(os.Stderr, "", log.LstdFlags), RetryWaitMin: defaultRetryWaitMin, RetryWaitMax: defaultRetryWaitMax, @@ -304,6 +318,26 @@ func NewClient() *Client { } } +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger: + // ok + case hclog.Logger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or hclog.Logger, was %T", c.Logger)) + } + }) + + return c.Logger +} + // DefaultRetryPolicy provides a default callback for Client.CheckRetry, which // will retry on connection errors and server errors. func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { @@ -385,8 +419,19 @@ func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Respo // Do wraps calling an HTTP method with retries. func (c *Client) Do(req *Request) (*http.Response, error) { - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + case hclog.Logger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + } } var resp *http.Response @@ -399,6 +444,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if req.body != nil { body, err := req.body() if err != nil { + c.HTTPClient.CloseIdleConnections() return resp, err } if c, ok := body.(io.ReadCloser); ok { @@ -408,8 +454,15 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } } - if c.RequestLogHook != nil { - c.RequestLogHook(c.Logger, req.Request, i) + if c.RequestLogHook != nil && logger != nil { + switch v := logger.(type) { + case Logger: + c.RequestLogHook(v, req.Request, i) + case hclog.Logger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } } // Attempt the request @@ -421,16 +474,28 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // Check if we should continue with retries. checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) - if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - } - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - c.ResponseLogHook(c.Logger, resp) + if logger != nil { + if err != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + case hclog.Logger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case Logger: + c.ResponseLogHook(v, resp) + case hclog.Logger: + c.ResponseLogHook(hookLogger{v}, resp) + default: + c.ResponseLogHook(nil, resp) + } + } } } @@ -439,6 +504,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if checkErr != nil { err = checkErr } + c.HTTPClient.CloseIdleConnections() return resp, err } @@ -459,17 +525,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if code > 0 { desc = fmt.Sprintf("%s (status: %d)", desc, code) } - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + case hclog.Logger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + } } select { case <-req.Context().Done(): + c.HTTPClient.CloseIdleConnections() return nil, req.Context().Err() case <-time.After(wait): } } if c.ErrorHandler != nil { + c.HTTPClient.CloseIdleConnections() return c.ErrorHandler(resp, err, c.RetryMax+1) } @@ -478,6 +551,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if resp != nil { resp.Body.Close() } + c.HTTPClient.CloseIdleConnections() return nil, fmt.Errorf("%s %s giving up after %d attempts", req.Method, req.URL, c.RetryMax+1) } @@ -487,8 +561,13 @@ func (c *Client) drainBody(body io.ReadCloser) { defer body.Close() _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) + if c.logger() != nil { + switch v := c.logger().(type) { + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + case hclog.Logger: + v.Error("error reading response body", "error", err) + } } } } diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod index d28c8c8e..becbb7ee 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/go.mod +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -1,3 +1,6 @@ module github.com/hashicorp/go-retryablehttp -require github.com/hashicorp/go-cleanhttp v0.5.0 +require ( + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.9.2 +) diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum index 3ed0fd98..71afe568 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/go.sum +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -1,2 +1,10 @@ -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 00000000..76984907 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 00000000..fbde8b9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 00000000..dd57f9d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 00000000..0c10c4e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,83 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } + buf := make([]byte, size) + if _, err := io.ReadFull(reader, buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 00000000..01c5dc21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.9 + - "1.10" + - 1.11 + - 1.12 + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 00000000..6f3a15ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 00000000..d0557596 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 00000000..f5285555 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 00000000..1032c560 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,380 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqualTo tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 00000000..cc888d43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore new file mode 100644 index 00000000..83656241 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 00000000..e474cd07 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 00000000..33e58cfa --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 00000000..555225a2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 00000000..2547df97 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 00000000..8ad8826b --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/golang-lru + +go 1.12 diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 00000000..052a38b4 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,134 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + evicted = c.lru.Add(key, value) + c.lock.Unlock() + return evicted +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) (present bool) { + c.lock.Lock() + present = c.lru.Remove(key) + c.lock.Unlock() + return +} + +// Resize changes the cache size. +func (c *Cache) Resize(size int) (evicted int) { + c.lock.Lock() + evicted = c.lru.Resize(size) + c.lock.Unlock() + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + c.lock.Unlock() + return +} + +// GetOldest returns the oldest entry +func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { + c.lock.Lock() + key, value, ok = c.lru.GetOldest() + c.lock.Unlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/raft-snapshot/LICENSE b/vendor/github.com/hashicorp/raft-snapshot/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/raft-snapshot/README.md b/vendor/github.com/hashicorp/raft-snapshot/README.md new file mode 100644 index 00000000..3a851105 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/README.md @@ -0,0 +1 @@ +# raft-snapshot \ No newline at end of file diff --git a/vendor/github.com/hashicorp/raft-snapshot/archive.go b/vendor/github.com/hashicorp/raft-snapshot/archive.go new file mode 100644 index 00000000..9046d731 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/archive.go @@ -0,0 +1,278 @@ +// The archive utilities manage the internal format of a snapshot, which is a +// tar file with the following contents: +// +// meta.json - JSON-encoded snapshot metadata from Raft +// state.bin - Encoded snapshot data from Raft +// SHA256SUMS - SHA-256 sums of the above two files +// +// The integrity information is automatically created and checked, and a failure +// there just looks like an error to the caller. +package snapshot + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "hash" + "io" + "io/ioutil" + "time" + + "github.com/hashicorp/raft" +) + +// hashList manages a list of filenames and their hashes. +type hashList struct { + hashes map[string]hash.Hash +} + +// newHashList returns a new hashList. +func newHashList() *hashList { + return &hashList{ + hashes: make(map[string]hash.Hash), + } +} + +// Add creates a new hash for the given file. +func (hl *hashList) Add(file string) hash.Hash { + if existing, ok := hl.hashes[file]; ok { + return existing + } + + h := sha256.New() + hl.hashes[file] = h + return h +} + +// Encode takes the current sum of all the hashes and saves the hash list as a +// SHA256SUMS-style text file. +func (hl *hashList) Encode(w io.Writer) error { + for file, h := range hl.hashes { + if _, err := fmt.Fprintf(w, "%x %s\n", h.Sum([]byte{}), file); err != nil { + return err + } + } + return nil +} + +// DecodeAndVerify reads a SHA256SUMS-style text file and checks the results +// against the current sums for all the hashes. +func (hl *hashList) DecodeAndVerify(r io.Reader) error { + // Read the file and make sure everything in there has a matching hash. + seen := make(map[string]struct{}) + s := bufio.NewScanner(r) + for s.Scan() { + sha := make([]byte, sha256.Size) + var file string + if _, err := fmt.Sscanf(s.Text(), "%x %s", &sha, &file); err != nil { + return err + } + + h, ok := hl.hashes[file] + if !ok { + return fmt.Errorf("list missing hash for %q", file) + } + if !bytes.Equal(sha, h.Sum([]byte{})) { + return fmt.Errorf("hash check failed for %q", file) + } + seen[file] = struct{}{} + } + if err := s.Err(); err != nil { + return err + } + + // Make sure everything we had a hash for was seen. + for file := range hl.hashes { + if _, ok := seen[file]; !ok { + return fmt.Errorf("file missing for %q", file) + } + } + + return nil +} + +// write takes a writer and creates an archive with the snapshot metadata, +// the snapshot itself, and adds some integrity checking information. +func write(out io.Writer, metadata *raft.SnapshotMeta, snap io.Reader, sealer Sealer) error { + // Start a new tarball. + now := time.Now() + archive := tar.NewWriter(out) + + // Create a hash list that we will use to write a SHA256SUMS file into + // the archive. + hl := newHashList() + + // Encode the snapshot metadata, which we need to feed back during a + // restore. + metaHash := hl.Add("meta.json") + var metaBuffer bytes.Buffer + enc := json.NewEncoder(&metaBuffer) + if err := enc.Encode(metadata); err != nil { + return fmt.Errorf("failed to encode snapshot metadata: %v", err) + } + if err := archive.WriteHeader(&tar.Header{ + Name: "meta.json", + Mode: 0600, + Size: int64(metaBuffer.Len()), + ModTime: now, + }); err != nil { + return fmt.Errorf("failed to write snapshot metadata header: %v", err) + } + if _, err := io.Copy(archive, io.TeeReader(&metaBuffer, metaHash)); err != nil { + return fmt.Errorf("failed to write snapshot metadata: %v", err) + } + + // Copy the snapshot data given the size from the metadata. + snapHash := hl.Add("state.bin") + if err := archive.WriteHeader(&tar.Header{ + Name: "state.bin", + Mode: 0600, + Size: metadata.Size, + ModTime: now, + }); err != nil { + return fmt.Errorf("failed to write snapshot data header: %v", err) + } + if _, err := io.CopyN(archive, io.TeeReader(snap, snapHash), metadata.Size); err != nil { + return fmt.Errorf("failed to write snapshot metadata: %v", err) + } + + // Create a SHA256SUMS file that we can use to verify on restore. + var shaBuffer bytes.Buffer + if err := hl.Encode(&shaBuffer); err != nil { + return fmt.Errorf("failed to encode snapshot hashes: %v", err) + } + if err := archive.WriteHeader(&tar.Header{ + Name: "SHA256SUMS", + Mode: 0600, + Size: int64(shaBuffer.Len()), + ModTime: now, + }); err != nil { + return fmt.Errorf("failed to write snapshot hashes header: %v", err) + } + if _, err := io.Copy(archive, &shaBuffer); err != nil { + return fmt.Errorf("failed to write snapshot hashes: %v", err) + } + + if sealer != nil { + // Create a SHA256SUMS.sealed file that we can use to verify on restore. + var shaBuffer bytes.Buffer + if err := hl.Encode(&shaBuffer); err != nil { + return fmt.Errorf("failed to encode snapshot hashes: %v", err) + } + + sealed, err := sealer.Seal(context.Background(), shaBuffer.Bytes()) + if err != nil { + return fmt.Errorf("failed to seal snapshot hashes: %v", err) + } + + sealedSHABuffer := bytes.NewBuffer(sealed) + if err := archive.WriteHeader(&tar.Header{ + Name: "SHA256SUMS.sealed", + Mode: 0600, + Size: int64(sealedSHABuffer.Len()), + ModTime: now, + }); err != nil { + return fmt.Errorf("failed to write sealed snapshot hashes header: %v", err) + } + if _, err := io.Copy(archive, sealedSHABuffer); err != nil { + return fmt.Errorf("failed to write sealed snapshot hashes: %v", err) + } + } + + // Finalize the archive. + if err := archive.Close(); err != nil { + return fmt.Errorf("failed to finalize snapshot: %v", err) + } + + return nil +} + +// read takes a reader and extracts the snapshot metadata and the snapshot +// itself, and also checks the integrity of the data. You must arrange to call +// Close() on the returned object or else you will leak a temporary file. +func read(in io.Reader, metadata *raft.SnapshotMeta, snap io.Writer, sealer Sealer) error { + // Start a new tar reader. + archive := tar.NewReader(in) + + // Create a hash list that we will use to compare with the SHA256SUMS + // file in the archive. + hl := newHashList() + + // Populate the hashes for all the files we expect to see. The check at + // the end will make sure these are all present in the SHA256SUMS file + // and that the hashes match. + metaHash := hl.Add("meta.json") + snapHash := hl.Add("state.bin") + + // Look through the archive for the pieces we care about. + var shaBuffer bytes.Buffer + var sealedSHABuffer bytes.Buffer + for { + hdr, err := archive.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed reading snapshot: %v", err) + } + + switch hdr.Name { + case "meta.json": + // Previously we used json.Decode to decode the archive stream. There are + // edgecases in which it doesn't read all the bytes from the stream, even + // though the json object is still being parsed properly. Since we + // simutaniously feeded everything to metaHash, our hash ended up being + // different than what we calculated when creating the snapshot. Which in + // turn made the snapshot verification fail. By explicitly reading the + // whole thing first we ensure that we calculate the correct hash + // independent of how json.Decode works internally. + buf, err := ioutil.ReadAll(io.TeeReader(archive, metaHash)) + if err != nil { + return fmt.Errorf("failed to read snapshot metadata: %v", err) + } + if err := json.Unmarshal(buf, &metadata); err != nil { + return fmt.Errorf("failed to decode snapshot metadata: %v", err) + } + + case "state.bin": + if _, err := io.Copy(io.MultiWriter(snap, snapHash), archive); err != nil { + return fmt.Errorf("failed to read or write snapshot data: %v", err) + } + + case "SHA256SUMS": + if _, err := io.Copy(&shaBuffer, archive); err != nil { + return fmt.Errorf("failed to read snapshot hashes: %v", err) + } + + case "SHA256SUMS.sealed": + if _, err := io.Copy(&sealedSHABuffer, archive); err != nil { + return fmt.Errorf("failed to read snapshot hashes: %v", err) + } + + default: + return fmt.Errorf("unexpected file %q in snapshot", hdr.Name) + } + } + + if sealer != nil { + opened, err := sealer.Open(context.Background(), sealedSHABuffer.Bytes()) + if err != nil { + return fmt.Errorf("failed to open the sealed hashes: %v", err) + } + // Verify all the hashes. + if err := hl.DecodeAndVerify(bytes.NewBuffer(opened)); err != nil { + return fmt.Errorf("failed checking integrity of snapshot: %v", err) + } + } + + // Verify all the hashes. + if err := hl.DecodeAndVerify(&shaBuffer); err != nil { + return fmt.Errorf("failed checking integrity of snapshot: %v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/raft-snapshot/go.mod b/vendor/github.com/hashicorp/raft-snapshot/go.mod new file mode 100644 index 00000000..7382e17e --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/go.mod @@ -0,0 +1,11 @@ +module github.com/hashicorp/raft-snapshot + +go 1.12 + +require ( + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect + github.com/hashicorp/consul/sdk v0.1.1 + github.com/hashicorp/go-hclog v0.9.2 + github.com/hashicorp/go-msgpack v0.5.5 + github.com/hashicorp/raft v1.0.1 +) diff --git a/vendor/github.com/hashicorp/raft-snapshot/go.sum b/vendor/github.com/hashicorp/raft-snapshot/go.sum new file mode 100644 index 00000000..c0a6edd9 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/go.sum @@ -0,0 +1,45 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/raft v1.0.1 h1:94uRdS11oEneUkxmXq6Vg9shNhBILh2UTb9crQjJWl0= +github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/hashicorp/raft-snapshot/snapshot.go b/vendor/github.com/hashicorp/raft-snapshot/snapshot.go new file mode 100644 index 00000000..0c3ce739 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-snapshot/snapshot.go @@ -0,0 +1,234 @@ +// snapshot manages the interactions between Consul and Raft in order to take +// and restore snapshots for disaster recovery. The internal format of a +// snapshot is simply a tar file, as described in archive.go. +package snapshot + +import ( + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" +) + +// Snapshot is a structure that holds state about a temporary file that is used +// to hold a snapshot. By using an intermediate file we avoid holding everything +// in memory. +type Snapshot struct { + file *os.File + index uint64 +} + +// Size returns the file size of the snapshot archive. +func (s *Snapshot) Size() (int64, error) { + info, err := s.file.Stat() + if err != nil { + return 0, err + } + + return info.Size(), nil +} + +// Sealer is used to seal and open the SHASUM file inside the archive. +type Sealer interface { + Seal(context.Context, []byte) ([]byte, error) + Open(context.Context, []byte) ([]byte, error) +} + +// New takes a state snapshot of the given Raft instance into a temporary file +// and returns an object that gives access to the file as an io.Reader. You must +// arrange to call Close() on the returned object or else you will leak a +// temporary file. +func New(logger log.Logger, r *raft.Raft) (*Snapshot, error) { + return NewWithSealer(logger, r, nil) +} + +func NewWithSealer(logger log.Logger, r *raft.Raft, sealer Sealer) (*Snapshot, error) { + // Take the snapshot. + future := r.Snapshot() + if err := future.Error(); err != nil { + return nil, fmt.Errorf("Raft error when taking snapshot: %v", err) + } + + // Open up the snapshot. + metadata, snap, err := future.Open() + if err != nil { + return nil, fmt.Errorf("failed to open snapshot: %v:", err) + } + defer func() { + if err := snap.Close(); err != nil { + logger.Error("failed to close Raft snapshot", "error", err) + } + }() + + // Make a scratch file to receive the contents so that we don't buffer + // everything in memory. This gets deleted in Close() since we keep it + // around for re-reading. + archive, err := ioutil.TempFile("", "snapshot") + if err != nil { + return nil, fmt.Errorf("failed to create snapshot file: %v", err) + } + + // If anything goes wrong after this point, we will attempt to clean up + // the temp file. The happy path will disarm this. + var keep bool + defer func() { + if keep { + return + } + + if err := os.Remove(archive.Name()); err != nil { + logger.Error("failed to clean up temp snapshot", "error", err) + } + }() + + // Wrap the file writer in a gzip compressor. + compressor := gzip.NewWriter(archive) + + // Write the archive. + if err := write(compressor, metadata, snap, sealer); err != nil { + return nil, fmt.Errorf("failed to write snapshot file: %v", err) + } + + // Finish the compressed stream. + if err := compressor.Close(); err != nil { + return nil, fmt.Errorf("failed to compress snapshot file: %v", err) + } + + // Sync the compressed file and rewind it so it's ready to be streamed + // out by the caller. + if err := archive.Sync(); err != nil { + return nil, fmt.Errorf("failed to sync snapshot: %v", err) + } + if _, err := archive.Seek(0, 0); err != nil { + return nil, fmt.Errorf("failed to rewind snapshot: %v", err) + } + + keep = true + return &Snapshot{archive, metadata.Index}, nil +} + +// Index returns the index of the snapshot. This is safe to call on a nil +// snapshot, it will just return 0. +func (s *Snapshot) Index() uint64 { + if s == nil { + return 0 + } + return s.index +} + +// Read passes through to the underlying snapshot file. This is safe to call on +// a nil snapshot, it will just return an EOF. +func (s *Snapshot) Read(p []byte) (n int, err error) { + if s == nil { + return 0, io.EOF + } + return s.file.Read(p) +} + +// Close closes the snapshot and removes any temporary storage associated with +// it. You must arrange to call this whenever NewSnapshot() has been called +// successfully. This is safe to call on a nil snapshot. +func (s *Snapshot) Close() error { + if s == nil { + return nil + } + + if err := s.file.Close(); err != nil { + return err + } + return os.Remove(s.file.Name()) +} + +// Verify takes the snapshot from the reader and verifies its contents. +func Verify(in io.Reader) (*raft.SnapshotMeta, error) { + // Wrap the reader in a gzip decompressor. + decomp, err := gzip.NewReader(in) + if err != nil { + return nil, fmt.Errorf("failed to decompress snapshot: %v", err) + } + defer decomp.Close() + + // Read the archive, throwing away the snapshot data. + var metadata raft.SnapshotMeta + if err := read(decomp, &metadata, ioutil.Discard, nil); err != nil { + return nil, fmt.Errorf("failed to read snapshot file: %v", err) + } + return &metadata, nil +} + +// Restore takes the snapshot from the reader and attempts to apply it to the +// given Raft instance. +func Restore(logger log.Logger, in io.Reader, r *raft.Raft) error { + return RestoreWithSealer(logger, in, r, nil) +} + +func RestoreWithSealer(logger log.Logger, in io.Reader, r *raft.Raft, sealer Sealer) error { + var metadata raft.SnapshotMeta + snap, cleanupFunc, err := WriteToTempFileWithSealer(logger, in, &metadata, sealer) + if err != nil { + return err + } + defer cleanupFunc() + + // Feed the snapshot into Raft. + if err := r.Restore(&metadata, snap, 0); err != nil { + return fmt.Errorf("Raft error when restoring snapshot: %v", err) + } + + return nil +} + +func WriteToTempFile(logger log.Logger, in io.Reader, metadata *raft.SnapshotMeta) (*os.File, func(), error) { + return WriteToTempFileWithSealer(logger, in, metadata, nil) +} + +func WriteToTempFileWithSealer(logger log.Logger, in io.Reader, metadata *raft.SnapshotMeta, sealer Sealer) (*os.File, func(), error) { + // Wrap the reader in a gzip decompressor. + decomp, err := gzip.NewReader(in) + if err != nil { + return nil, nil, fmt.Errorf("failed to decompress snapshot: %v", err) + } + defer func() { + if err := decomp.Close(); err != nil { + logger.Error("failed to close snapshot decompressor", "error", err) + } + }() + + // Make a scratch file to receive the contents of the snapshot data so + // we can avoid buffering in memory. + snap, err := ioutil.TempFile("", "snapshot") + if err != nil { + return nil, nil, fmt.Errorf("failed to create temp snapshot file: %v", err) + } + cleanupFunc := func() { + if err := snap.Close(); err != nil { + logger.Error("failed to close temp snapshot", "error", err) + } + if err := os.Remove(snap.Name()); err != nil { + logger.Error("failed to clean up temp snapshot", "error", err) + } + } + + // Read the archive. + if err := read(decomp, metadata, snap, sealer); err != nil { + cleanupFunc() + return nil, nil, fmt.Errorf("failed to read snapshot file: %v", err) + } + + // Sync and rewind the file so it's ready to be read again. + if err := snap.Sync(); err != nil { + cleanupFunc() + return nil, nil, fmt.Errorf("failed to sync temp snapshot: %v", err) + } + if _, err := snap.Seek(0, 0); err != nil { + cleanupFunc() + return nil, nil, fmt.Errorf("failed to rewind temp snapshot: %v", err) + } + + return snap, cleanupFunc, nil +} diff --git a/vendor/github.com/hashicorp/raft/.gitignore b/vendor/github.com/hashicorp/raft/.gitignore new file mode 100644 index 00000000..83656241 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/raft/.travis.yml b/vendor/github.com/hashicorp/raft/.travis.yml new file mode 100644 index 00000000..faeb11ff --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + # Disabled until https://github.com/armon/go-metrics/issues/59 is fixed + # - 1.6 + - 1.8 + - 1.9 + - 1.12 + - tip + +install: make deps +script: + - make integ + +notifications: + flowdock: + secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= + diff --git a/vendor/github.com/hashicorp/raft/CHANGELOG.md b/vendor/github.com/hashicorp/raft/CHANGELOG.md new file mode 100644 index 00000000..56b4443d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/CHANGELOG.md @@ -0,0 +1,65 @@ +# UNRELEASED + +FEATURES + +* Improve FSM apply performance through batching. Implementing the `BatchingFSM` interface enables this new feature [[GH-364](https://github.com/hashicorp/raft/pull/364)] + +IMPROVEMENTS + +* Replace logger with hclog [[GH-360](https://github.com/hashicorp/raft/pull/360)] + +BUG FIXES + +* Export the leader field in LeaderObservation [[GH-357](https://github.com/hashicorp/raft/pull/357)] +* Fix snapshot to not attempt to truncate a negative range [[GH-358](https://github.com/hashicorp/raft/pull/358)] + +# 1.1.1 (July 23rd, 2019) + +FEATURES + +* Add support for extensions to be sent on log entries [[GH-353](https://github.com/hashicorp/raft/pull/353)] +* Add config option to skip snapshot restore on startup [[GH-340](https://github.com/hashicorp/raft/pull/340)] +* Add optional configuration store interface [[GH-339](https://github.com/hashicorp/raft/pull/339)] + +IMPROVEMENTS + +* Break out of group commit early when no logs are present [[GH-341](https://github.com/hashicorp/raft/pull/341)] + +BUGFIXES + +* Fix 64-bit counters on 32-bit platforms [[GH-344](https://github.com/hashicorp/raft/pull/344)] +* Don't defer closing source in recover/restore operations since it's in a loop [[GH-337](https://github.com/hashicorp/raft/pull/337)] + +# 1.1.0 (May 23rd, 2019) + +FEATURES + +* Add transfer leadership extension [[GH-306](https://github.com/hashicorp/raft/pull/306)] + +IMPROVEMENTS + +* Move to `go mod` [[GH-323](https://github.com/hashicorp/consul/pull/323)] +* Leveled log [[GH-321](https://github.com/hashicorp/consul/pull/321)] +* Add peer changes to observations [[GH-326](https://github.com/hashicorp/consul/pull/326)] + +BUGFIXES + +* Copy the contents of an InmemSnapshotStore when opening a snapshot [[GH-270](https://github.com/hashicorp/consul/pull/270)] +* Fix logging panic when converting parameters to strings [[GH-332](https://github.com/hashicorp/consul/pull/332)] + +# 1.0.1 (April 12th, 2019) + +IMPROVEMENTS + +* InMemTransport: Add timeout for sending a message [[GH-313](https://github.com/hashicorp/raft/pull/313)] +* ensure 'make deps' downloads test dependencies like testify [[GH-310](https://github.com/hashicorp/raft/pull/310)] +* Clarifies function of CommitTimeout [[GH-309](https://github.com/hashicorp/raft/pull/309)] +* Add additional metrics regarding log dispatching and committal [[GH-316](https://github.com/hashicorp/raft/pull/316)] + +# 1.0.0 (October 3rd, 2017) + +v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version manages server identities using a UUID, so introduces some breaking API changes. It also versions the Raft protocol, and requires some special steps when interoperating with Raft servers running older versions of the library (see the detailed comment in config.go about version compatibility). You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required to port Consul to these new interfaces. + +# 0.1.0 (September 29th, 2017) + +v0.1.0 is the original stable version of the library that was in master and has been maintained with no breaking API changes. This was in use by Consul prior to version 0.7.0. diff --git a/vendor/github.com/hashicorp/raft/LICENSE b/vendor/github.com/hashicorp/raft/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/raft/Makefile new file mode 100644 index 00000000..ec95881b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/Makefile @@ -0,0 +1,36 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) +TEST_RESULTS_DIR?=/tmp/test-results + +test: + go test $(TESTARGS) -timeout=60s -race . + go test $(TESTARGS) -timeout=60s -tags batchtest -race . + +integ: test + INTEG_TESTS=yes go test $(TESTARGS) -timeout=25s -run=Integ . + INTEG_TESTS=yes go test $(TESTARGS) -timeout=25s -tags batchtest -run=Integ . + +ci.test-norace: + gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s + gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s -tags batchtest + +ci.test: + gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s -race . + gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s -race -tags batchtest . + +ci.integ: ci.test + INTEG_TESTS=yes gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-integ.xml -- -timeout=25s -run=Integ . + INTEG_TESTS=yes gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-integ.xml -- -timeout=25s -run=Integ -tags batchtest . + +fuzz: + go test $(TESTARGS) -timeout=500s ./fuzzy + go test $(TESTARGS) -timeout=500s -tags batchtest ./fuzzy + +deps: + go get -t -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps diff --git a/vendor/github.com/hashicorp/raft/README.md b/vendor/github.com/hashicorp/raft/README.md new file mode 100644 index 00000000..dc8bb644 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/README.md @@ -0,0 +1,107 @@ +raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) [![CircleCI](https://circleci.com/gh/hashicorp/raft.svg?style=svg)](https://circleci.com/gh/hashicorp/raft) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching, such as replicated state +machines which are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.2+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + +## Tagged Releases + +As of September 2017, HashiCorp will start using tags for this library to clearly indicate +major version updates. We recommend you vendor your application's dependency on this library. + +* v0.1.0 is the original stable version of the library that was in master and has been maintained +with no breaking API changes. This was in use by Consul prior to version 0.7.0. + +* v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version +manages server identities using a UUID, so introduces some breaking API changes. It also versions +the Raft protocol, and requires some special steps when interoperating with Raft servers running +older versions of the library (see the detailed comment in config.go about version compatibility). +You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required +to port Consul to these new interfaces. + + This version includes some new features as well, including non voting servers, a new address + provider abstraction in the transport layer, and more resilient snapshots. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://raft.github.io/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://raft.github.io/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. + diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go new file mode 100644 index 00000000..17ca6556 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/api.go @@ -0,0 +1,1102 @@ +package raft + +import ( + "errors" + "fmt" + "io" + "os" + "strconv" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + + "github.com/armon/go-metrics" +) + +const ( + // This is the current suggested max size of the data in a raft log entry. + // This is based on current architecture, default timing, etc. Clients can + // ignore this value if they want as there is no actual hard checking + // within the library. As the library is enhanced this value may change + // over time to reflect current suggested maximums. + // + // Increasing beyond this risks RPC IO taking too long and preventing + // timely heartbeat signals which are sent in serial in current transports, + // potentially causing leadership instability. + SuggestedMaxDataSize = 512 * 1024 +) + +var ( + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrAbortedByRestore is returned when a leader fails to commit a log + // entry because it's been superseded by a user snapshot restore. + ErrAbortedByRestore = errors.New("snapshot restored while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrNothingNewToSnapshot is returned when trying to create a snapshot + // but there's nothing new commited to the FSM since we started. + ErrNothingNewToSnapshot = errors.New("nothing new to snapshot") + + // ErrUnsupportedProtocol is returned when an operation is attempted + // that's not supported by the current protocol version. + ErrUnsupportedProtocol = errors.New("operation not supported with current protocol version") + + // ErrCantBootstrap is returned when attempt is made to bootstrap a + // cluster that already has state present. + ErrCantBootstrap = errors.New("bootstrap only works on new clusters") + + // ErrLeadershipTransferInProgress is returned when the leader is rejecting + // client requests because it is attempting to transfer leadership. + ErrLeadershipTransferInProgress = errors.New("leadership transfer in progress") +) + +// Raft implements a Raft node. +type Raft struct { + raftState + + // protocolVersion is used to inter-operate with Raft servers running + // different versions of the library. See comments in config.go for more + // details. + protocolVersion ProtocolVersion + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // Configuration provided at Raft initialization + conf Config + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmMutateCh is used to send state-changing updates to the FSM. This + // receives pointers to commitTuple structures when applying logs or + // pointers to restoreFuture structures when restoring a snapshot. We + // need control over the order of these operations when doing user + // restores so that we finish applying any old log applies before we + // take a user snapshot on the leader, otherwise we might restore the + // snapshot and apply old logs to it that were in the pipe. + fsmMutateCh chan interface{} + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // Leader is the current cluster leader + leader ServerAddress + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // candidateFromLeadershipTransfer is used to indicate that this server became + // candidate because the leader tries to transfer leadership. This flag is + // used in RequestVoteRequest to express that a leadership transfer is going + // on. + candidateFromLeadershipTransfer bool + + // Stores our local server ID, used to avoid sending RPCs to ourself + localID ServerID + + // Stores our local addr + localAddr ServerAddress + + // Used for our logging + logger hclog.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Used to request the leader to make configuration changes. + configurationChangeCh chan *configurationChangeFuture + + // Tracks the latest configuration and latest committed configuration from + // the log/snapshot. + configurations configurations + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // userSnapshotCh is used for user-triggered snapshots + userSnapshotCh chan *userSnapshotFuture + + // userRestoreCh is used for user-triggered restores of external + // snapshots + userRestoreCh chan *userRestoreFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture + + // configurationsCh is used to get the configuration data safely from + // outside of the main thread. + configurationsCh chan *configurationsFuture + + // bootstrapCh is used to attempt an initial bootstrap from outside of + // the main thread. + bootstrapCh chan *bootstrapFuture + + // List of observers and the mutex that protects them. The observers list + // is indexed by an artificial ID which is used for deregistration. + observersLock sync.RWMutex + observers map[uint64]*Observer + + // leadershipTransferCh is used to start a leadership transfer from outside of + // the main thread. + leadershipTransferCh chan *leadershipTransferFuture +} + +// BootstrapCluster initializes a server's storage with the given cluster +// configuration. This should only be called at the beginning of time for the +// cluster with an identical configuration listing all Voter servers. There is +// no need to bootstrap Nonvoter and Staging servers. +// +// A cluster can only be bootstrapped once from a single participating Voter +// server. Any further attempts to bootstrap will return an error that can be +// safely ignored. +// +// One sane approach is to bootstrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func BootstrapCluster(conf *Config, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Make sure the cluster is in a clean state. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if hasState { + return ErrCantBootstrap + } + + // Set current term to 1. + if err := stable.SetUint64(keyCurrentTerm, 1); err != nil { + return fmt.Errorf("failed to save current term: %v", err) + } + + // Append configuration entry to log. + entry := &Log{ + Index: 1, + Term: 1, + } + if conf.ProtocolVersion < 3 { + entry.Type = LogRemovePeerDeprecated + entry.Data = encodePeers(configuration, trans) + } else { + entry.Type = LogConfiguration + entry.Data = EncodeConfiguration(configuration) + } + if err := logs.StoreLog(entry); err != nil { + return fmt.Errorf("failed to append configuration entry to log: %v", err) + } + + return nil +} + +// RecoverCluster is used to manually force a new configuration in order to +// recover from a loss of quorum where the current configuration cannot be +// restored, such as when several servers die at the same time. This works by +// reading all the current state for this server, creating a snapshot with the +// supplied configuration, and then truncating the Raft log. This is the only +// safe way to force a given configuration without actually altering the log to +// insert any new entries, which could cause conflicts with other servers with +// different state. +// +// WARNING! This operation implicitly commits all entries in the Raft log, so +// in general this is an extremely unsafe operation. If you've lost your other +// servers and are performing a manual recovery, then you've also lost the +// commit information, so this is likely the best you can do, but you should be +// aware that calling this can cause Raft log entries that were in the process +// of being replicated but not yet be committed to be committed. +// +// Note the FSM passed here is used for the snapshot operations and will be +// left in a state that should not be used by the application. Be sure to +// discard this FSM and any associated state and provide a fresh one when +// calling NewRaft later. +// +// A typical way to recover the cluster is to shut down all servers and then +// run RecoverCluster on every server using an identical configuration. When +// the cluster is then restarted, and election should occur and then Raft will +// resume normal operation. If it's desired to make a particular server the +// leader, this can be used to inject a new configuration with that server as +// the sole voter, and then join up other new clean-state peer servers using +// the usual APIs in order to bring the cluster back into a known state. +func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Refuse to recover if there's no existing state. This would be safe to + // do, but it is likely an indication of an operator error where they + // expect data to be there and it's not. By refusing, we force them + // to show intent to start a cluster fresh by explicitly doing a + // bootstrap, rather than quietly fire up a fresh cluster here. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if !hasState { + return fmt.Errorf("refused to recover cluster with no initial state, this is probably an operator error") + } + + // Attempt to restore any snapshots we find, newest to oldest. + var snapshotIndex uint64 + var snapshotTerm uint64 + snapshots, err := snaps.List() + if err != nil { + return fmt.Errorf("failed to list snapshots: %v", err) + } + for _, snapshot := range snapshots { + if !conf.NoSnapshotRestoreOnStart { + _, source, err := snaps.Open(snapshot.ID) + if err != nil { + // Skip this one and try the next. We will detect if we + // couldn't open any snapshots. + continue + } + + err = fsm.Restore(source) + // Close the source after the restore has completed + source.Close() + if err != nil { + // Same here, skip and try the next one. + continue + } + } + + snapshotIndex = snapshot.Index + snapshotTerm = snapshot.Term + break + } + if len(snapshots) > 0 && (snapshotIndex == 0 || snapshotTerm == 0) { + return fmt.Errorf("failed to restore any of the available snapshots") + } + + // The snapshot information is the best known end point for the data + // until we play back the Raft log entries. + lastIndex := snapshotIndex + lastTerm := snapshotTerm + + // Apply any Raft log entries past the snapshot. + lastLogIndex, err := logs.LastIndex() + if err != nil { + return fmt.Errorf("failed to find last log: %v", err) + } + for index := snapshotIndex + 1; index <= lastLogIndex; index++ { + var entry Log + if err := logs.GetLog(index, &entry); err != nil { + return fmt.Errorf("failed to get log at index %d: %v", index, err) + } + if entry.Type == LogCommand { + _ = fsm.Apply(&entry) + } + lastIndex = entry.Index + lastTerm = entry.Term + } + + // Create a new snapshot, placing the configuration in as if it was + // committed at index 1. + snapshot, err := fsm.Snapshot() + if err != nil { + return fmt.Errorf("failed to snapshot FSM: %v", err) + } + version := getSnapshotVersion(conf.ProtocolVersion) + sink, err := snaps.Create(version, lastIndex, lastTerm, configuration, 1, trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + if err := snapshot.Persist(sink); err != nil { + return fmt.Errorf("failed to persist snapshot: %v", err) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to finalize snapshot: %v", err) + } + + // Compact the log so that we don't get bad interference from any + // configuration change log entries that might be there. + firstLogIndex, err := logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + if err := logs.DeleteRange(firstLogIndex, lastLogIndex); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + + return nil +} + +// HasExistingState returns true if the server has any existing state (logs, +// knowledge of a current term, or any snapshots). +func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) { + // Make sure we don't have a current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err == nil { + if currentTerm > 0 { + return true, nil + } + } else { + if err.Error() != "not found" { + return false, fmt.Errorf("failed to read current term: %v", err) + } + } + + // Make sure we have an empty log. + lastIndex, err := logs.LastIndex() + if err != nil { + return false, fmt.Errorf("failed to get last log index: %v", err) + } + if lastIndex > 0 { + return true, nil + } + + // Make sure we have no snapshots + snapshots, err := snaps.List() + if err != nil { + return false, fmt.Errorf("failed to list snapshots: %v", err) + } + if len(snapshots) > 0 { + return true, nil + } + + return false, nil +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any +// old state, such as snapshots, logs, peers, etc, all those will be restored +// when creating the Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, trans Transport) (*Raft, error) { + // Validate the configuration. + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput. + var logger hclog.Logger + if conf.Logger != nil { + logger = conf.Logger + } else { + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + + logger = hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.LevelFromString(conf.LogLevel), + Output: conf.LogOutput, + }) + } + + // Try to restore the current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the index of the last log entry. + lastIndex, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the last log entry. + var lastLog Log + if lastIndex > 0 { + if err = logs.GetLog(lastIndex, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log at index %d: %v", lastIndex, err) + } + } + + // Make sure we have a valid server address and ID. + protocolVersion := conf.ProtocolVersion + localAddr := ServerAddress(trans.LocalAddr()) + localID := conf.LocalID + + // TODO (slackpad) - When we deprecate protocol version 2, remove this + // along with the AddPeer() and RemovePeer() APIs. + if protocolVersion < 3 && string(localID) != string(localAddr) { + return nil, fmt.Errorf("when running with ProtocolVersion < 3, LocalID must be set to the network address") + } + + // Create Raft struct. + r := &Raft{ + protocolVersion: protocolVersion, + applyCh: make(chan *logFuture), + conf: *conf, + fsm: fsm, + fsmMutateCh: make(chan interface{}, 128), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localID: localID, + localAddr: localAddr, + logger: logger, + logs: logs, + configurationChangeCh: make(chan *configurationChangeFuture), + configurations: configurations{}, + rpcCh: trans.Consumer(), + snapshots: snaps, + userSnapshotCh: make(chan *userSnapshotFuture), + userRestoreCh: make(chan *userRestoreFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + configurationsCh: make(chan *configurationsFuture, 8), + bootstrapCh: make(chan *bootstrapFuture), + observers: make(map[uint64]*Observer), + leadershipTransferCh: make(chan *leadershipTransferFuture, 1), + } + + // Initialize as a follower. + r.setState(Follower) + + // Start as leader if specified. This should only be used + // for testing purposes. + if conf.StartAsLeader { + r.setState(Leader) + r.setLeader(r.localAddr) + } + + // Restore the current term and the last log. + r.setCurrentTerm(currentTerm) + r.setLastLog(lastLog.Index, lastLog.Term) + + // Attempt to restore a snapshot if there are any. + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Scan through the log for any configuration change entries. + snapshotIndex, _ := r.getLastSnapshot() + for index := snapshotIndex + 1; index <= lastLog.Index; index++ { + var entry Log + if err := r.logs.GetLog(index, &entry); err != nil { + r.logger.Error("failed to get log", "index", index, "error", err) + panic(err) + } + r.processConfigurationLogEntry(&entry) + } + r.logger.Info("initial configuration", + "index", r.configurations.latestIndex, + "servers", hclog.Fmt("%+v", r.configurations.latest.Servers)) + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + // Start the background work. + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails if none +// of them can be restored. This is called at initialization time, and is +// completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Error("failed to list snapshots", "error", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + if !r.conf.NoSnapshotRestoreOnStart { + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + r.logger.Error("failed to open snapshot", "id", snapshot.ID, "error", err) + continue + } + + err = r.fsm.Restore(source) + // Close the source after the restore has completed + source.Close() + if err != nil { + r.logger.Error("failed to restore snapshot", "id", snapshot.ID, "error", err) + continue + } + + r.logger.Info("restored from snapshot", "id", snapshot.ID) + } + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshot(snapshot.Index, snapshot.Term) + + // Update the configuration + if snapshot.Version > 0 { + r.configurations.committed = snapshot.Configuration + r.configurations.committedIndex = snapshot.ConfigurationIndex + r.configurations.latest = snapshot.Configuration + r.configurations.latestIndex = snapshot.ConfigurationIndex + } else { + configuration := decodePeers(snapshot.Peers, r.trans) + r.configurations.committed = configuration + r.configurations.committedIndex = snapshot.Index + r.configurations.latest = configuration + r.configurations.latestIndex = snapshot.Index + } + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} + +// BootstrapCluster is equivalent to non-member BootstrapCluster but can be +// called on an un-bootstrapped Raft instance after it has been created. This +// should only be called at the beginning of time for the cluster with an +// identical configuration listing all Voter servers. There is no need to +// bootstrap Nonvoter and Staging servers. +// +// A cluster can only be bootstrapped once from a single participating Voter +// server. Any further attempts to bootstrap will return an error that can be +// safely ignored. +// +// One sane approach is to bootstrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func (r *Raft) BootstrapCluster(configuration Configuration) Future { + bootstrapReq := &bootstrapFuture{} + bootstrapReq.init() + bootstrapReq.configuration = configuration + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.bootstrapCh <- bootstrapReq: + return bootstrapReq + } +} + +// Leader is used to return the current leader of the cluster. +// It may return empty string if there is no current leader +// or the leader is unknown. +func (r *Raft) Leader() ServerAddress { + r.leaderLock.RLock() + leader := r.leader + r.leaderLock.RUnlock() + return leader +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + return r.ApplyLog(Log{Data: cmd}, timeout) +} + +// ApplyLog performs Apply but takes in a Log directly. The only values +// currently taken from the submitted Log are Data and Extensions. +func (r *Raft) ApplyLog(log Log, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: log.Data, + Extensions: log.Extensions, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceeding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogBarrier, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure the current node is still +// the leader. This can be done to prevent stale reads when a +// new leader has potentially been elected. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// GetConfiguration returns the latest configuration and its associated index +// currently in use. This may not yet be committed. This must not be called on +// the main thread (which can access the information directly). +func (r *Raft) GetConfiguration() ConfigurationFuture { + configReq := &configurationsFuture{} + configReq.init() + select { + case <-r.shutdownCh: + configReq.respond(ErrRaftShutdown) + return configReq + case r.configurationsCh <- configReq: + return configReq + } +} + +// AddPeer (deprecated) is used to add a new peer into the cluster. This must be +// run on the leader or it will fail. Use AddVoter/AddNonvoter instead. +func (r *Raft) AddPeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: ServerID(peer), + serverAddress: peer, + prevIndex: 0, + }, 0) +} + +// RemovePeer (deprecated) is used to remove a peer from the cluster. If the +// current leader is being removed, it will cause a new election +// to occur. This must be run on the leader or it will fail. +// Use RemoveServer instead. +func (r *Raft) RemovePeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: ServerID(peer), + prevIndex: 0, + }, 0) +} + +// AddVoter will add the given server to the cluster as a staging server. If the +// server is already in the cluster as a voter, this updates the server's address. +// This must be run on the leader or it will fail. The leader will promote the +// staging server to a voter once that server is ready. If nonzero, prevIndex is +// the index of the only configuration upon which this change may be applied; if +// another configuration entry has been added in the meantime, this request will +// fail. If nonzero, timeout is how long this server should wait before the +// configuration change log entry is appended. +func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// AddNonvoter will add the given server to the cluster but won't assign it a +// vote. The server will receive log entries, but it won't participate in +// elections or log entry commitment. If the server is already in the cluster, +// this updates the server's address. This must be run on the leader or it will +// fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) AddNonvoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddNonvoter, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// RemoveServer will remove the given server from the cluster. If the current +// leader is being removed, it will cause a new election to occur. This must be +// run on the leader or it will fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// DemoteVoter will take away a server's vote, if it has one. If present, the +// server will continue to receive log entries, but it won't participate in +// elections or log entry commitment. If the server is not in the cluster, this +// does nothing. This must be run on the leader or it will fail. For prevIndex +// and timeout, see AddVoter. +func (r *Raft) DemoteVoter(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: DemoteVoter, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + return &shutdownFuture{r} + } + + // avoid closing transport twice + return &shutdownFuture{nil} +} + +// Snapshot is used to manually force Raft to take a snapshot. Returns a future +// that can be used to block until complete, and that contains a function that +// can be used to open the snapshot. +func (r *Raft) Snapshot() SnapshotFuture { + future := &userSnapshotFuture{} + future.init() + select { + case r.userSnapshotCh <- future: + return future + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return future + } +} + +// Restore is used to manually force Raft to consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the higher of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and blocks until the restore is complete +// or an error occurs. +// +// WARNING! This operation has the leader take on the state of the snapshot and +// then sets itself up so that it replicates that to its followers though the +// install snapshot process. This involves a potentially dangerous period where +// the leader commits ahead of its followers, so should only be used for disaster +// recovery into a fresh cluster, and should not be used in normal operations. +func (r *Raft) Restore(meta *SnapshotMeta, reader io.Reader, timeout time.Duration) error { + metrics.IncrCounter([]string{"raft", "restore"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Perform the restore. + restore := &userRestoreFuture{ + meta: meta, + reader: reader, + } + restore.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.userRestoreCh <- restore: + // If the restore is ingested then wait for it to complete. + if err := restore.Error(); err != nil { + return err + } + } + + // Apply a no-op log entry. Waiting for this allows us to wait until the + // followers have gotten the restore and replicated at least this new + // entry, which shows that we've also faulted and installed the + // snapshot with the contents of the restore. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + noop.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.applyCh <- noop: + return noop.Error() + } +} + +// State is used to return the current raft state. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on +// acquiring or losing leadership. It sends true if we become +// the leader, and false if we lose it. The channel is not buffered, +// and does not block on writes. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +// String returns a string representation of this Raft node. +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This +// should only be used for informative purposes or debugging. +// +// Keys are: "state", "term", "last_log_index", "last_log_term", +// "commit_index", "applied_index", "fsm_pending", +// "last_snapshot_index", "last_snapshot_term", +// "latest_configuration", "last_contact", and "num_peers". +// +// The value of "state" is a numeric constant representing one of +// the possible leadership states the node is in at any given time. +// the possible states are: "Follower", "Candidate", "Leader", "Shutdown". +// +// The value of "latest_configuration" is a string which contains +// the id of each server, its suffrage status, and its address. +// +// The value of "last_contact" is either "never" if there +// has been no contact with a leader, "0" if the node is in the +// leader state, or the time since last contact with a leader +// formatted as a string. +// +// The value of "num_peers" is the number of other voting servers in the +// cluster, not including this node. If this node isn't part of the +// configuration then this will be "0". +// +// All other values are uint64s, formatted as strings. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + lastLogIndex, lastLogTerm := r.getLastLog() + lastSnapIndex, lastSnapTerm := r.getLastSnapshot() + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(lastLogIndex), + "last_log_term": toString(lastLogTerm), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmMutateCh))), + "last_snapshot_index": toString(lastSnapIndex), + "last_snapshot_term": toString(lastSnapTerm), + "protocol_version": toString(uint64(r.protocolVersion)), + "protocol_version_min": toString(uint64(ProtocolVersionMin)), + "protocol_version_max": toString(uint64(ProtocolVersionMax)), + "snapshot_version_min": toString(uint64(SnapshotVersionMin)), + "snapshot_version_max": toString(uint64(SnapshotVersionMax)), + } + + future := r.GetConfiguration() + if err := future.Error(); err != nil { + r.logger.Warn("could not get configuration for stats", "error", err) + } else { + configuration := future.Configuration() + s["latest_configuration_index"] = toString(future.Index()) + s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers) + + // This is a legacy metric that we've seen people use in the wild. + hasUs := false + numPeers := 0 + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + hasUs = true + } else { + numPeers++ + } + } + } + if !hasUs { + numPeers = 0 + } + s["num_peers"] = toString(uint64(numPeers)) + } + + last := r.LastContact() + if r.getState() == Leader { + s["last_contact"] = "0" + } else if last.IsZero() { + s["last_contact"] = "never" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// AppliedIndex returns the last index applied to the FSM. This is generally +// lagging behind the last index, especially for indexes that are persisted but +// have not yet been considered committed by the leader. NOTE - this reflects +// the last index that was sent to the application's FSM over the apply channel +// but DOES NOT mean that the application's FSM has yet consumed it and applied +// it to its internal state. Thus, the application's state may lag behind this +// index. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} + +// LeadershipTransfer will transfer leadership to a server in the cluster. +// This can only be called from the leader, or it will fail. The leader will +// stop accepting client requests, make sure the target server is up to date +// and starts the transfer with a TimeoutNow message. This message has the same +// effect as if the election timeout on the on the target server fires. Since +// it is unlikely that another server is starting an election, it is very +// likely that the target server is able to win the election. Note that raft +// protocol version 3 is not sufficient to use LeadershipTransfer. A recent +// version of that library has to be used that includes this feature. Using +// transfer leadership is safe however in a cluster where not every node has +// the latest version. If a follower cannot be promoted, it will fail +// gracefully. +func (r *Raft) LeadershipTransfer() Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(nil, nil) +} + +// LeadershipTransferToServer does the same as LeadershipTransfer but takes a +// server in the arguments in case a leadership should be transitioned to a +// specific server in the cluster. Note that raft protocol version 3 is not +// sufficient to use LeadershipTransfer. A recent version of that library has +// to be used that includes this feature. Using transfer leadership is safe +// however in a cluster where not every node has the latest version. If a +// follower cannot be promoted, it will fail gracefully. +func (r *Raft) LeadershipTransferToServer(id ServerID, address ServerAddress) Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(&id, &address) +} diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go new file mode 100644 index 00000000..3358a328 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commands.go @@ -0,0 +1,177 @@ +package raft + +// RPCHeader is a common sub-structure used to pass along protocol version and +// other information about the cluster. For older Raft implementations before +// versioning was added this will default to a zero-valued structure when read +// by newer Raft versions. +type RPCHeader struct { + // ProtocolVersion is the version of the protocol the sender is + // speaking. + ProtocolVersion ProtocolVersion +} + +// WithRPCHeader is an interface that exposes the RPC header. +type WithRPCHeader interface { + GetRPCHeader() RPCHeader +} + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + RPCHeader + + // Provide the current term and leader + Term uint64 + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *AppendEntriesRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + RPCHeader + + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool + + // There are scenarios where this request didn't succeed + // but there's no need to wait/back-off the next attempt. + NoRetryBackoff bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *AppendEntriesResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 + + // Used to indicate to peers if this vote was triggered by a leadership + // transfer. It is required for leadership transfer to work, because servers + // wouldn't vote otherwise if they are aware of an existing leader. + LeadershipTransfer bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Peers is deprecated, but required by servers that only understand + // protocol version 0. This is not populated in protocol version 2 + // and later. + Peers []byte + + // Is the vote granted. + Granted bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + RPCHeader + SnapshotVersion SnapshotVersion + + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot. This is deprecated in favor of Configuration + // but remains here in case we receive an InstallSnapshot from a leader + // that's running old code. + Peers []byte + + // Cluster membership. + Configuration []byte + // Log index where 'Configuration' entry was originally written. + ConfigurationIndex uint64 + + // Size of the snapshot + Size int64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *InstallSnapshotRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + RPCHeader + + Term uint64 + Success bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// TimeoutNowRequest is the command used by a leader to signal another server to +// start an election. +type TimeoutNowRequest struct { + RPCHeader +} + +// GetRPCHeader - See WithRPCHeader. +func (r *TimeoutNowRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// TimeoutNowResponse is the response to TimeoutNowRequest. +type TimeoutNowResponse struct { + RPCHeader +} + +// GetRPCHeader - See WithRPCHeader. +func (r *TimeoutNowResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} diff --git a/vendor/github.com/hashicorp/raft/commitment.go b/vendor/github.com/hashicorp/raft/commitment.go new file mode 100644 index 00000000..7aa36464 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commitment.go @@ -0,0 +1,101 @@ +package raft + +import ( + "sort" + "sync" +) + +// Commitment is used to advance the leader's commit index. The leader and +// replication goroutines report in newly written entries with Match(), and +// this notifies on commitCh when the commit index has advanced. +type commitment struct { + // protects matchIndexes and commitIndex + sync.Mutex + // notified when commitIndex increases + commitCh chan struct{} + // voter ID to log index: the server stores up through this log entry + matchIndexes map[ServerID]uint64 + // a quorum stores up through this log entry. monotonically increases. + commitIndex uint64 + // the first index of this leader's term: this needs to be replicated to a + // majority of the cluster before this leader may mark anything committed + // (per Raft's commitment rule) + startIndex uint64 +} + +// newCommitment returns an commitment struct that notifies the provided +// channel when log entries have been committed. A new commitment struct is +// created each time this server becomes leader for a particular term. +// 'configuration' is the servers in the cluster. +// 'startIndex' is the first index created in this term (see +// its description above). +func newCommitment(commitCh chan struct{}, configuration Configuration, startIndex uint64) *commitment { + matchIndexes := make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + matchIndexes[server.ID] = 0 + } + } + return &commitment{ + commitCh: commitCh, + matchIndexes: matchIndexes, + commitIndex: 0, + startIndex: startIndex, + } +} + +// Called when a new cluster membership configuration is created: it will be +// used to determine commitment from now on. 'configuration' is the servers in +// the cluster. +func (c *commitment) setConfiguration(configuration Configuration) { + c.Lock() + defer c.Unlock() + oldMatchIndexes := c.matchIndexes + c.matchIndexes = make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + c.matchIndexes[server.ID] = oldMatchIndexes[server.ID] // defaults to 0 + } + } + c.recalculate() +} + +// Called by leader after commitCh is notified +func (c *commitment) getCommitIndex() uint64 { + c.Lock() + defer c.Unlock() + return c.commitIndex +} + +// Match is called once a server completes writing entries to disk: either the +// leader has written the new entry or a follower has replied to an +// AppendEntries RPC. The given server's disk agrees with this server's log up +// through the given index. +func (c *commitment) match(server ServerID, matchIndex uint64) { + c.Lock() + defer c.Unlock() + if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev { + c.matchIndexes[server] = matchIndex + c.recalculate() + } +} + +// Internal helper to calculate new commitIndex from matchIndexes. +// Must be called with lock held. +func (c *commitment) recalculate() { + if len(c.matchIndexes) == 0 { + return + } + + matched := make([]uint64, 0, len(c.matchIndexes)) + for _, idx := range c.matchIndexes { + matched = append(matched, idx) + } + sort.Sort(uint64Slice(matched)) + quorumMatchIndex := matched[(len(matched)-1)/2] + + if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex { + c.commitIndex = quorumMatchIndex + asyncNotifyCh(c.commitCh) + } +} diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go new file mode 100644 index 00000000..272761b7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/config.go @@ -0,0 +1,276 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/hashicorp/go-hclog" +) + +// ProtocolVersion is the version of the protocol (which includes RPC messages +// as well as Raft-specific log entries) that this server can _understand_. Use +// the ProtocolVersion member of the Config object to control the version of +// the protocol to use when _speaking_ to other servers. Note that depending on +// the protocol version being spoken, some otherwise understood RPC messages +// may be refused. See dispositionRPC for details of this logic. +// +// There are notes about the upgrade path in the description of the versions +// below. If you are starting a fresh cluster then there's no reason not to +// jump right to the latest protocol version. If you need to interoperate with +// older, version 0 Raft servers you'll need to drive the cluster through the +// different versions in order. +// +// The version details are complicated, but here's a summary of what's required +// to get from a version 0 cluster to version 3: +// +// 1. In version N of your app that starts using the new Raft library with +// versioning, set ProtocolVersion to 1. +// 2. Make version N+1 of your app require version N as a prerequisite (all +// servers must be upgraded). For version N+1 of your app set ProtocolVersion +// to 2. +// 3. Similarly, make version N+2 of your app require version N+1 as a +// prerequisite. For version N+2 of your app, set ProtocolVersion to 3. +// +// During this upgrade, older cluster members will still have Server IDs equal +// to their network addresses. To upgrade an older member and give it an ID, it +// needs to leave the cluster and re-enter: +// +// 1. Remove the server from the cluster with RemoveServer, using its network +// address as its ServerID. +// 2. Update the server's config to use a UUID or something else that is +// not tied to the machine as the ServerID (restarting the server). +// 3. Add the server back to the cluster with AddVoter, using its new ID. +// +// You can do this during the rolling upgrade from N+1 to N+2 of your app, or +// as a rolling change at any time after the upgrade. +// +// Version History +// +// 0: Original Raft library before versioning was added. Servers running this +// version of the Raft library use AddPeerDeprecated/RemovePeerDeprecated +// for all configuration changes, and have no support for LogConfiguration. +// 1: First versioned protocol, used to interoperate with old servers, and begin +// the migration path to newer versions of the protocol. Under this version +// all configuration changes are propagated using the now-deprecated +// RemovePeerDeprecated Raft log entry. This means that server IDs are always +// set to be the same as the server addresses (since the old log entry type +// cannot transmit an ID), and only AddPeer/RemovePeer APIs are supported. +// Servers running this version of the protocol can understand the new +// LogConfiguration Raft log entry but will never generate one so they can +// remain compatible with version 0 Raft servers in the cluster. +// 2: Transitional protocol used when migrating an existing cluster to the new +// server ID system. Server IDs are still set to be the same as server +// addresses, but all configuration changes are propagated using the new +// LogConfiguration Raft log entry type, which can carry full ID information. +// This version supports the old AddPeer/RemovePeer APIs as well as the new +// ID-based AddVoter/RemoveServer APIs which should be used when adding +// version 3 servers to the cluster later. This version sheds all +// interoperability with version 0 servers, but can interoperate with newer +// Raft servers running with protocol version 1 since they can understand the +// new LogConfiguration Raft log entry, and this version can still understand +// their RemovePeerDeprecated Raft log entries. We need this protocol version +// as an intermediate step between 1 and 3 so that servers will propagate the +// ID information that will come from newly-added (or -rolled) servers using +// protocol version 3, but since they are still using their address-based IDs +// from the previous step they will still be able to track commitments and +// their own voting status properly. If we skipped this step, servers would +// be started with their new IDs, but they wouldn't see themselves in the old +// address-based configuration, so none of the servers would think they had a +// vote. +// 3: Protocol adding full support for server IDs and new ID-based server APIs +// (AddVoter, AddNonvoter, etc.), old AddPeer/RemovePeer APIs are no longer +// supported. Version 2 servers should be swapped out by removing them from +// the cluster one-by-one and re-adding them with updated configuration for +// this protocol version, along with their server ID. The remove/add cycle +// is required to populate their server ID. Note that removing must be done +// by ID, which will be the old server's address. +type ProtocolVersion int + +const ( + // ProtocolVersionMin is the minimum protocol version + ProtocolVersionMin ProtocolVersion = 0 + // ProtocolVersionMax is the maximum protocol version + ProtocolVersionMax = 3 +) + +// SnapshotVersion is the version of snapshots that this server can understand. +// Currently, it is always assumed that the server generates the latest version, +// though this may be changed in the future to include a configurable version. +// +// Version History +// +// 0: Original Raft library before versioning was added. The peers portion of +// these snapshots is encoded in the legacy format which requires decodePeers +// to parse. This version of snapshots should only be produced by the +// unversioned Raft library. +// 1: New format which adds support for a full configuration structure and its +// associated log index, with support for server IDs and non-voting server +// modes. To ease upgrades, this also includes the legacy peers structure but +// that will never be used by servers that understand version 1 snapshots. +// Since the original Raft library didn't enforce any versioning, we must +// include the legacy peers structure for this version, but we can deprecate +// it in the next snapshot version. +type SnapshotVersion int + +const ( + // SnapshotVersionMin is the minimum snapshot version + SnapshotVersionMin SnapshotVersion = 0 + // SnapshotVersionMax is the maximum snapshot version + SnapshotVersionMax = 1 +) + +// Config provides any necessary configuration for the Raft server. +type Config struct { + // ProtocolVersion allows a Raft server to inter-operate with older + // Raft servers running an older version of the code. This is used to + // version the wire protocol as well as Raft-specific log entries that + // the server uses when _speaking_ to other servers. There is currently + // no auto-negotiation of versions so all servers must be manually + // configured with compatible versions. See ProtocolVersionMin and + // ProtocolVersionMax for the versions of the protocol that this server + // can _understand_. + ProtocolVersion ProtocolVersion + + // HeartbeatTimeout specifies the time in follower state without + // a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // ElectionTimeout specifies the time in candidate state without + // a leader before we attempt an election. + ElectionTimeout time.Duration + + // CommitTimeout controls the time without an Apply() operation + // before we heartbeat to ensure a timely commit. Due to random + // staggering, may be delayed as much as 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is + // used so that we can quickly replay logs on a follower instead of being + // forced to send an entire snapshot. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // StartAsLeader forces Raft to start in the leader state. This should + // never be used except for testing purposes, as it can cause a split-brain. + StartAsLeader bool + + // The unique ID for this server across all time. When running with + // ProtocolVersion < 3, you must set this to be the same as the network + // address of your transport. + LocalID ServerID + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // LogLevel represents a log level. If a no matching string is specified, + // hclog.NoLevel is assumed. + LogLevel string + + // Logger is a user-provided hc-log logger. If nil, a logger writing to + // LogOutput with LogLevel is used. + Logger hclog.Logger + + // NoSnapshotRestoreOnStart controls if raft will restore a snapshot to the + // FSM on start. This is useful if your FSM recovers from other mechanisms + // than raft snapshotting. Snapshot metadata will still be used to initalize + // raft's configuration and index values. This is used in NewRaft and + // RestoreCluster. + NoSnapshotRestoreOnStart bool +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + ProtocolVersion: ProtocolVersionMax, + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + LeaderLeaseTimeout: 500 * time.Millisecond, + LogLevel: "DEBUG", + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + // We don't actually support running as 0 in the library any more, but + // we do understand it. + protocolMin := ProtocolVersionMin + if protocolMin == 0 { + protocolMin = 1 + } + if config.ProtocolVersion < protocolMin || + config.ProtocolVersion > ProtocolVersionMax { + return fmt.Errorf("Protocol version %d must be >= %d and <= %d", + config.ProtocolVersion, protocolMin, ProtocolVersionMax) + } + if len(config.LocalID) == 0 { + return fmt.Errorf("LocalID cannot be empty") + } + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("Heartbeat timeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("Election timeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("Commit timeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("Snapshot interval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("Leader lease timeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft/configuration.go b/vendor/github.com/hashicorp/raft/configuration.go new file mode 100644 index 00000000..bf19997b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/configuration.go @@ -0,0 +1,363 @@ +package raft + +import "fmt" + +// ServerSuffrage determines whether a Server in a Configuration gets a vote. +type ServerSuffrage int + +// Note: Don't renumber these, since the numbers are written into the log. +const ( + // Voter is a server whose vote is counted in elections and whose match index + // is used in advancing the leader's commit index. + Voter ServerSuffrage = iota + // Nonvoter is a server that receives log entries but is not considered for + // elections or commitment purposes. + Nonvoter + // Staging is a server that acts like a nonvoter with one exception: once a + // staging server receives enough log entries to be sufficiently caught up to + // the leader's log, the leader will invoke a membership change to change + // the Staging server to a Voter. + Staging +) + +func (s ServerSuffrage) String() string { + switch s { + case Voter: + return "Voter" + case Nonvoter: + return "Nonvoter" + case Staging: + return "Staging" + } + return "ServerSuffrage" +} + +// ConfigurationStore provides an interface that can optionally be implemented by FSMs +// to store configuration updates made in the replicated log. In general this is only +// necessary for FSMs that mutate durable state directly instead of applying changes +// in memory and snapshotting periodically. By storing configuration changes, the +// persistent FSM state can behave as a complete snapshot, and be able to recover +// without an external snapshot just for persisting the raft configuration. +type ConfigurationStore interface { + // ConfigurationStore is a superset of the FSM functionality + FSM + + // StoreConfiguration is invoked once a log entry containing a configuration + // change is committed. It takes the index at which the configuration was + // written and the configuration value. + StoreConfiguration(index uint64, configuration Configuration) +} + +type nopConfigurationStore struct{} + +func (s nopConfigurationStore) StoreConfiguration(_ uint64, _ Configuration) {} + +// ServerID is a unique string identifying a server for all time. +type ServerID string + +// ServerAddress is a network address for a server that a transport can contact. +type ServerAddress string + +// Server tracks the information about a single server in a configuration. +type Server struct { + // Suffrage determines whether the server gets a vote. + Suffrage ServerSuffrage + // ID is a unique string identifying this server for all time. + ID ServerID + // Address is its network address that a transport can contact. + Address ServerAddress +} + +// Configuration tracks which servers are in the cluster, and whether they have +// votes. This should include the local server, if it's a member of the cluster. +// The servers are listed no particular order, but each should only appear once. +// These entries are appended to the log during membership changes. +type Configuration struct { + Servers []Server +} + +// Clone makes a deep copy of a Configuration. +func (c *Configuration) Clone() (copy Configuration) { + copy.Servers = append(copy.Servers, c.Servers...) + return +} + +// ConfigurationChangeCommand is the different ways to change the cluster +// configuration. +type ConfigurationChangeCommand uint8 + +const ( + // AddStaging makes a server Staging unless its Voter. + AddStaging ConfigurationChangeCommand = iota + // AddNonvoter makes a server Nonvoter unless its Staging or Voter. + AddNonvoter + // DemoteVoter makes a server Nonvoter unless its absent. + DemoteVoter + // RemoveServer removes a server entirely from the cluster membership. + RemoveServer + // Promote is created automatically by a leader; it turns a Staging server + // into a Voter. + Promote +) + +func (c ConfigurationChangeCommand) String() string { + switch c { + case AddStaging: + return "AddStaging" + case AddNonvoter: + return "AddNonvoter" + case DemoteVoter: + return "DemoteVoter" + case RemoveServer: + return "RemoveServer" + case Promote: + return "Promote" + } + return "ConfigurationChangeCommand" +} + +// configurationChangeRequest describes a change that a leader would like to +// make to its current configuration. It's used only within a single server +// (never serialized into the log), as part of `configurationChangeFuture`. +type configurationChangeRequest struct { + command ConfigurationChangeCommand + serverID ServerID + serverAddress ServerAddress // only present for AddStaging, AddNonvoter + // prevIndex, if nonzero, is the index of the only configuration upon which + // this change may be applied; if another configuration entry has been + // added in the meantime, this request will fail. + prevIndex uint64 +} + +// configurations is state tracked on every server about its Configurations. +// Note that, per Diego's dissertation, there can be at most one uncommitted +// configuration at a time (the next configuration may not be created until the +// prior one has been committed). +// +// One downside to storing just two configurations is that if you try to take a +// snapshot when your state machine hasn't yet applied the committedIndex, we +// have no record of the configuration that would logically fit into that +// snapshot. We disallow snapshots in that case now. An alternative approach, +// which LogCabin uses, is to track every configuration change in the +// log. +type configurations struct { + // committed is the latest configuration in the log/snapshot that has been + // committed (the one with the largest index). + committed Configuration + // committedIndex is the log index where 'committed' was written. + committedIndex uint64 + // latest is the latest configuration in the log/snapshot (may be committed + // or uncommitted) + latest Configuration + // latestIndex is the log index where 'latest' was written. + latestIndex uint64 +} + +// Clone makes a deep copy of a configurations object. +func (c *configurations) Clone() (copy configurations) { + copy.committed = c.committed.Clone() + copy.committedIndex = c.committedIndex + copy.latest = c.latest.Clone() + copy.latestIndex = c.latestIndex + return +} + +// hasVote returns true if the server identified by 'id' is a Voter in the +// provided Configuration. +func hasVote(configuration Configuration, id ServerID) bool { + for _, server := range configuration.Servers { + if server.ID == id { + return server.Suffrage == Voter + } + } + return false +} + +// checkConfiguration tests a cluster membership configuration for common +// errors. +func checkConfiguration(configuration Configuration) error { + idSet := make(map[ServerID]bool) + addressSet := make(map[ServerAddress]bool) + var voters int + for _, server := range configuration.Servers { + if server.ID == "" { + return fmt.Errorf("Empty ID in configuration: %v", configuration) + } + if server.Address == "" { + return fmt.Errorf("Empty address in configuration: %v", server) + } + if idSet[server.ID] { + return fmt.Errorf("Found duplicate ID in configuration: %v", server.ID) + } + idSet[server.ID] = true + if addressSet[server.Address] { + return fmt.Errorf("Found duplicate address in configuration: %v", server.Address) + } + addressSet[server.Address] = true + if server.Suffrage == Voter { + voters++ + } + } + if voters == 0 { + return fmt.Errorf("Need at least one voter in configuration: %v", configuration) + } + return nil +} + +// nextConfiguration generates a new Configuration from the current one and a +// configuration change request. It's split from appendConfigurationEntry so +// that it can be unit tested easily. +func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) { + if change.prevIndex > 0 && change.prevIndex != currentIndex { + return Configuration{}, fmt.Errorf("Configuration changed since %v (latest is %v)", change.prevIndex, currentIndex) + } + + configuration := current.Clone() + switch change.command { + case AddStaging: + // TODO: barf on new address? + newServer := Server{ + // TODO: This should add the server as Staging, to be automatically + // promoted to Voter later. However, the promotion to Voter is not yet + // implemented, and doing so is not trivial with the way the leader loop + // coordinates with the replication goroutines today. So, for now, the + // server will have a vote right away, and the Promote case below is + // unused. + Suffrage: Voter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage == Voter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case AddNonvoter: + newServer := Server{ + Suffrage: Nonvoter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage != Nonvoter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case DemoteVoter: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers[i].Suffrage = Nonvoter + break + } + } + case RemoveServer: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...) + break + } + } + case Promote: + for i, server := range configuration.Servers { + if server.ID == change.serverID && server.Suffrage == Staging { + configuration.Servers[i].Suffrage = Voter + break + } + } + } + + // Make sure we didn't do something bad like remove the last voter + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + + return configuration, nil +} + +// encodePeers is used to serialize a Configuration into the old peers format. +// This is here for backwards compatibility when operating with a mix of old +// servers and should be removed once we deprecate support for protocol version 1. +func encodePeers(configuration Configuration, trans Transport) []byte { + // Gather up all the voters, other suffrage types are not supported by + // this data format. + var encPeers [][]byte + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + encPeers = append(encPeers, trans.EncodePeer(server.ID, server.Address)) + } + } + + // Encode the entire array. + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize an old list of peers into a Configuration. +// This is here for backwards compatibility with old log entries and snapshots; +// it should be removed eventually. +func decodePeers(buf []byte, trans Transport) Configuration { + // Decode the buffer first. + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + panic(fmt.Errorf("failed to decode peers: %v", err)) + } + + // Deserialize each peer. + var servers []Server + for _, enc := range encPeers { + p := trans.DecodePeer(enc) + servers = append(servers, Server{ + Suffrage: Voter, + ID: ServerID(p), + Address: ServerAddress(p), + }) + } + + return Configuration{ + Servers: servers, + } +} + +// EncodeConfiguration serializes a Configuration using MsgPack, or panics on +// errors. +func EncodeConfiguration(configuration Configuration) []byte { + buf, err := encodeMsgPack(configuration) + if err != nil { + panic(fmt.Errorf("failed to encode configuration: %v", err)) + } + return buf.Bytes() +} + +// DecodeConfiguration deserializes a Configuration using MsgPack, or panics on +// errors. +func DecodeConfiguration(buf []byte) Configuration { + var configuration Configuration + if err := decodeMsgPack(buf, &configuration); err != nil { + panic(fmt.Errorf("failed to decode configuration: %v", err)) + } + return configuration +} diff --git a/vendor/github.com/hashicorp/raft/discard_snapshot.go b/vendor/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 00000000..187b8e7f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,64 @@ +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +// DiscardSnapshotSink is used to fulfill the SnapshotSink interface +// while always discarding the . This is useful for when the log +// should be truncated but no snapshot should be retained. This +// should never be used for production use, and is only suitable +// for testing. +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +// Create returns a valid type implementing the SnapshotSink which +// always discards the snapshot. +func (d *DiscardSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +// List returns successfully with a nil for []*SnapshotMeta. +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +// Open returns an error since the DiscardSnapshotStore does not +// support opening snapshots. +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +// Write returns successfully with the lenght of the input byte slice +// to satisfy the WriteCloser interface +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +// Close returns a nil error +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +// ID returns "discard" for DiscardSnapshotSink +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +// Cancel returns successfully with a nil error +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/file_snapshot.go b/vendor/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 00000000..d59f5d35 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,535 @@ +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "github.com/hashicorp/go-hclog" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger hclog.Logger +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger hclog.Logger + dir string + parentDir string + meta fileSnapshotMeta + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStoreWithLogger(base string, retain int, logger hclog.Logger) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + logger = hclog.New(&hclog.LoggerOptions{ + Name: "snapshot", + Output: hclog.DefaultOutput, + Level: hclog.DefaultLevel, + }) + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: logger, + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if logOutput == nil { + logOutput = os.Stderr + } + return NewFileSnapshotStoreWithLogger(base, retain, hclog.New(&hclog.LoggerOptions{ + Name: "snapshot", + Output: logOutput, + Level: hclog.DefaultLevel, + })) +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + + if err = fh.Close(); err != nil { + return err + } + + if err = os.Remove(path); err != nil { + return err + } + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Info("creating new snapshot", "path", path) + + // Make the directory + if err := os.MkdirAll(path, 0755); err != nil { + f.logger.Error("failed to make snapshot directly", "error", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + parentDir: f.path, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Error("failed to write metadata", "error", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Error("failed to create state file", "error", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Error("failed to get snapshots", "error", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := ioutil.ReadDir(f.path) + if err != nil { + f.logger.Error("failed to scan snapshot directory", "error", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Warn("found temporary snapshot", "name", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Warn("failed to read metadata", "name", dirName, "error", err) + continue + } + + // Make sure we can understand this version. + if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax { + f.logger.Warn("snapshot version not supported", "name", dirName, "version", meta.Version) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Error("failed to get meta data to open snapshot", "error", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Error("failed to open state file", "error", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Error("CRC checksum failed", "stored", meta.CRC, "computed", computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Error("state file seek failed", "error", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Error("failed to get snapshots", "error", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Info("reaping snapshot", "path", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Error("failed to reap snapshot", "path", path, "error", err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Error("failed to finalize snapshot", "error", err) + if delErr := os.RemoveAll(s.dir); delErr != nil { + s.logger.Error("failed to delete temporary snapshot directory", "path", s.dir, "error", delErr) + return delErr + } + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Error("failed to write metadata", "error", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Error("failed to move snapshot into place", "error", err) + return err + } + + if runtime.GOOS != "windows" { // skipping fsync for directory entry edits on Windows, only needed for *nix style file systems + parentFH, err := os.Open(s.parentDir) + defer parentFH.Close() + if err != nil { + s.logger.Error("failed to open snapshot parent directory", "path", s.parentDir, "error", err) + return err + } + + if err = parentFH.Sync(); err != nil { + s.logger.Error("failed syncing parent directory", "path", s.parentDir, "error", err) + return err + } + } + + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return err + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Error("failed to finalize snapshot", "error", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Sync to force fsync to disk + if err := s.stateFile.Sync(); err != nil { + return err + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + fh, err := os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err := enc.Encode(&s.meta); err != nil { + return err + } + + if err = buffered.Flush(); err != nil { + return err + } + + if err = fh.Sync(); err != nil { + return err + } + + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/hashicorp/raft/fsm.go b/vendor/github.com/hashicorp/raft/fsm.go new file mode 100644 index 00000000..5622ebf8 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,235 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// FSM provides an interface that can be implemented by +// clients to make use of the replicated log. +type FSM interface { + // Apply log is invoked once a log entry is committed. + // It returns a value which will be made available in the + // ApplyFuture returned by Raft.Apply method if that + // method was called on the same Raft node as the FSM. + Apply(*Log) interface{} + + // Snapshot is used to support log compaction. This call should + // return an FSMSnapshot which can be used to save a point-in-time + // snapshot of the FSM. Apply and Snapshot are not called in multiple + // threads, but Apply will be called concurrently with Persist. This means + // the FSM should be implemented in a fashion that allows for concurrent + // updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state. + Restore(io.ReadCloser) error +} + +// BatchingFSM extends the FSM interface to add an ApplyBatch function. This can +// optionally be implemented by clients to enable multiple logs to be applied to +// the FSM in batches. Up to MaxAppendEntries could be sent in a batch. +type BatchingFSM interface { + // ApplyBatch is invoked once a batch of log entries has been committed and + // are ready to be applied to the FSM. ApplyBatch will take in an array of + // log entries. These log entries will be in the order they were committed, + // will not have gaps, and could be of a few log types. Clients should check + // the log type prior to attempting to decode the data attached. Presently + // the LogCommand and LogConfiguration types will be sent. + // + // The returned slice must be the same length as the input and each response + // should correlate to the log at the same index of the input. The returned + // values will be made available in the ApplyFuture returned by Raft.Apply + // method if that method was called on the same Raft node as the FSM. + ApplyBatch([]*Log) []interface{} + + FSM +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + + batchingFSM, batchingEnabled := r.fsm.(BatchingFSM) + configStore, configStoreEnabled := r.fsm.(ConfigurationStore) + + commitSingle := func(req *commitTuple) { + // Apply the log if a command or config change + var resp interface{} + // Make sure we send a response + defer func() { + // Invoke the future if given + if req.future != nil { + req.future.response = resp + req.future.respond(nil) + } + }() + + switch req.log.Type { + case LogCommand: + start := time.Now() + resp = r.fsm.Apply(req.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + + case LogConfiguration: + if !configStoreEnabled { + // Return early to avoid incrementing the index and term for + // an unimplemented operation. + return + } + + start := time.Now() + configStore.StoreConfiguration(req.log.Index, DecodeConfiguration(req.log.Data)) + metrics.MeasureSince([]string{"raft", "fsm", "store_config"}, start) + } + + // Update the indexes + lastIndex = req.log.Index + lastTerm = req.log.Term + } + + commitBatch := func(reqs []*commitTuple) { + if !batchingEnabled { + for _, ct := range reqs { + commitSingle(ct) + } + return + } + + // Only send LogCommand and LogConfiguration log types. LogBarrier types + // will not be sent to the FSM. + shouldSend := func(l *Log) bool { + switch l.Type { + case LogCommand, LogConfiguration: + return true + } + return false + } + + var lastBatchIndex, lastBatchTerm uint64 + sendLogs := make([]*Log, 0, len(reqs)) + for _, req := range reqs { + if shouldSend(req.log) { + sendLogs = append(sendLogs, req.log) + } + lastBatchIndex = req.log.Index + lastBatchTerm = req.log.Term + } + + var responses []interface{} + if len(sendLogs) > 0 { + start := time.Now() + responses = batchingFSM.ApplyBatch(sendLogs) + metrics.MeasureSince([]string{"raft", "fsm", "applyBatch"}, start) + metrics.AddSample([]string{"raft", "fsm", "applyBatchNum"}, float32(len(reqs))) + + // Ensure we get the expected responses + if len(sendLogs) != len(responses) { + panic("invalid number of responses") + } + } + + // Update the indexes + lastIndex = lastBatchIndex + lastTerm = lastBatchTerm + + var i int + for _, req := range reqs { + var resp interface{} + // If the log was sent to the FSM, retrieve the response. + if shouldSend(req.log) { + resp = responses[i] + i++ + } + + if req.future != nil { + req.future.response = resp + req.future.respond(nil) + } + } + } + + restore := func(req *restoreFuture) { + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + return + } + + // Attempt to restore + start := time.Now() + if err := r.fsm.Restore(source); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + source.Close() + return + } + source.Close() + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + } + + snapshot := func(req *reqSnapshotFuture) { + // Is there something to snapshot? + if lastIndex == 0 { + req.respond(ErrNothingNewToSnapshot) + return + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.snapshot = snap + req.respond(err) + } + + for { + select { + case ptr := <-r.fsmMutateCh: + switch req := ptr.(type) { + case []*commitTuple: + commitBatch(req) + + case *restoreFuture: + restore(req) + + default: + panic(fmt.Errorf("bad type passed to fsmMutateCh: %#v", ptr)) + } + + case req := <-r.fsmSnapshotCh: + snapshot(req) + + case <-r.shutdownCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/raft/future.go new file mode 100644 index 00000000..6346b453 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/future.go @@ -0,0 +1,303 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + // Error blocks until the future arrives and then + // returns the error status of the future. + // This may be called any number of times - all + // calls will return the same value. + // Note that it is not OK to call this method + // twice concurrently on the same Future instance. + Error() error +} + +// IndexFuture is used for future actions that can result in a raft log entry +// being created. +type IndexFuture interface { + Future + + // Index holds the index of the newly applied log entry. + // This must not be called until after the Error method has returned. + Index() uint64 +} + +// ApplyFuture is used for Apply and can return the FSM response. +type ApplyFuture interface { + IndexFuture + + // Response returns the FSM response as returned + // by the FSM.Apply method. This must not be called + // until after the Error method has returned. + Response() interface{} +} + +// ConfigurationFuture is used for GetConfiguration and can return the +// latest configuration in use by Raft. +type ConfigurationFuture interface { + IndexFuture + + // Configuration contains the latest configuration. This must + // not be called until after the Error method has returned. + Configuration() Configuration +} + +// SnapshotFuture is used for waiting on a user-triggered snapshot to complete. +type SnapshotFuture interface { + Future + + // Open is a function you can call to access the underlying snapshot and + // its metadata. This must not be called until after the Error method + // has returned. + Open() (*SnapshotMeta, io.ReadCloser, error) +} + +// LeadershipTransferFuture is used for waiting on a user-triggered leadership +// transfer to complete. +type LeadershipTransferFuture interface { + Future +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + // Note that when we've received a nil error, this + // won't trigger, but the channel is closed after + // send so we'll still return nil below. + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + d.err = <-d.errCh + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// There are several types of requests that cause a configuration entry to +// be appended to the log. These are encoded here for leaderLoop() to process. +// This is internal to a single server. +type configurationChangeFuture struct { + logFuture + req configurationChangeRequest +} + +// bootstrapFuture is used to attempt a live bootstrap of the cluster. See the +// Raft object's BootstrapCluster member function for more details. +type bootstrapFuture struct { + deferError + + // configuration is the proposed bootstrap configuration to apply. + configuration Configuration +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + if s.raft == nil { + return nil + } + s.raft.waitShutdown() + if closeable, ok := s.raft.trans.(WithClose); ok { + closeable.Close() + } + return nil +} + +// userSnapshotFuture is used for waiting on a user-triggered snapshot to +// complete. +type userSnapshotFuture struct { + deferError + + // opener is a function used to open the snapshot. This is filled in + // once the future returns with no error. + opener func() (*SnapshotMeta, io.ReadCloser, error) +} + +// Open is a function you can call to access the underlying snapshot and its +// metadata. +func (u *userSnapshotFuture) Open() (*SnapshotMeta, io.ReadCloser, error) { + if u.opener == nil { + return nil, nil, fmt.Errorf("no snapshot available") + } + // Invalidate the opener so it can't get called multiple times, + // which isn't generally safe. + defer func() { + u.opener = nil + }() + return u.opener() +} + +// userRestoreFuture is used for waiting on a user-triggered restore of an +// external snapshot to complete. +type userRestoreFuture struct { + deferError + + // meta is the metadata that belongs with the snapshot. + meta *SnapshotMeta + + // reader is the interface to read the snapshot contents from. + reader io.Reader +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// leadershipTransferFuture is used to track the progress of a leadership +// transfer internally. +type leadershipTransferFuture struct { + deferError + + ID *ServerID + Address *ServerAddress +} + +// configurationsFuture is used to retrieve the current configurations. This is +// used to allow safe access to this information outside of the main thread. +type configurationsFuture struct { + deferError + configurations configurations +} + +// Configuration returns the latest configuration in use by Raft. +func (c *configurationsFuture) Configuration() Configuration { + return c.configurations.latest +} + +// Index returns the index of the latest configuration in use by Raft. +func (c *configurationsFuture) Index() uint64 { + return c.configurations.latestIndex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/vendor/github.com/hashicorp/raft/go.mod b/vendor/github.com/hashicorp/raft/go.mod new file mode 100644 index 00000000..6af740be --- /dev/null +++ b/vendor/github.com/hashicorp/raft/go.mod @@ -0,0 +1,13 @@ +module github.com/hashicorp/raft + +go 1.12 + +require ( + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 + github.com/boltdb/bolt v1.3.1 // indirect + github.com/hashicorp/go-hclog v0.9.1 + github.com/hashicorp/go-msgpack v0.5.5 + github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea + github.com/stretchr/testify v1.3.0 + golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 // indirect +) diff --git a/vendor/github.com/hashicorp/raft/go.sum b/vendor/github.com/hashicorp/raft/go.sum new file mode 100644 index 00000000..265fa28e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/go.sum @@ -0,0 +1,45 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/raft/inmem_snapshot.go b/vendor/github.com/hashicorp/raft/inmem_snapshot.go new file mode 100644 index 00000000..641d9d81 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_snapshot.go @@ -0,0 +1,111 @@ +package raft + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" +) + +// InmemSnapshotStore implements the SnapshotStore interface and +// retains only the most recent snapshot +type InmemSnapshotStore struct { + latest *InmemSnapshotSink + hasSnapshot bool + sync.RWMutex +} + +// InmemSnapshotSink implements SnapshotSink in memory +type InmemSnapshotSink struct { + meta SnapshotMeta + contents *bytes.Buffer +} + +// NewInmemSnapshotStore creates a blank new InmemSnapshotStore +func NewInmemSnapshotStore() *InmemSnapshotStore { + return &InmemSnapshotStore{ + latest: &InmemSnapshotSink{ + contents: &bytes.Buffer{}, + }, + } +} + +// Create replaces the stored snapshot with a new one using the given args +func (m *InmemSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + name := snapshotName(term, index) + + m.Lock() + defer m.Unlock() + + sink := &InmemSnapshotSink{ + meta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + contents: &bytes.Buffer{}, + } + m.hasSnapshot = true + m.latest = sink + + return sink, nil +} + +// List returns the latest snapshot taken +func (m *InmemSnapshotStore) List() ([]*SnapshotMeta, error) { + m.RLock() + defer m.RUnlock() + + if !m.hasSnapshot { + return []*SnapshotMeta{}, nil + } + return []*SnapshotMeta{&m.latest.meta}, nil +} + +// Open wraps an io.ReadCloser around the snapshot contents +func (m *InmemSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + m.RLock() + defer m.RUnlock() + + if m.latest.meta.ID != id { + return nil, nil, fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id) + } + + // Make a copy of the contents, since a bytes.Buffer can only be read + // once. + contents := bytes.NewBuffer(m.latest.contents.Bytes()) + return &m.latest.meta, ioutil.NopCloser(contents), nil +} + +// Write appends the given bytes to the snapshot contents +func (s *InmemSnapshotSink) Write(p []byte) (n int, err error) { + written, err := io.Copy(s.contents, bytes.NewReader(p)) + s.meta.Size += written + return int(written), err +} + +// Close updates the Size and is otherwise a no-op +func (s *InmemSnapshotSink) Close() error { + return nil +} + +// ID returns the ID of the SnapshotMeta +func (s *InmemSnapshotSink) ID() string { + return s.meta.ID +} + +// Cancel returns successfully with a nil error +func (s *InmemSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_store.go b/vendor/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 00000000..6285610f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,130 @@ +package raft + +import ( + "errors" + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + if min <= i.lowIndex { + i.lowIndex = max + 1 + } + if max >= i.highIndex { + i.highIndex = min - 1 + } + if i.lowIndex > i.highIndex { + i.lowIndex = 0 + i.highIndex = 0 + } + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + val := i.kv[string(key)] + if val == nil { + return nil, errors.New("not found") + } + return val, nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 00000000..7f493f48 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,348 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() ServerAddress { + return ServerAddress(generateUUID()) +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr ServerAddress + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr ServerAddress + peers map[ServerAddress]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransportWithTimeout is used to initialize a new transport and +// generates a random local address if none is specified. The given timeout +// will be used to decide how long to wait for a connected peer to process the +// RPCs that we're sending it. See also Connect() and Consumer(). +func NewInmemTransportWithTimeout(addr ServerAddress, timeout time.Duration) (ServerAddress, *InmemTransport) { + if string(addr) == "" { + addr = NewInmemAddr() + } + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[ServerAddress]*InmemTransport), + timeout: timeout, + } + return addr, trans +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address if none is specified +func NewInmemTransport(addr ServerAddress) (ServerAddress, *InmemTransport) { + return NewInmemTransportWithTimeout(addr, 50*time.Millisecond) +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() ServerAddress { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + i.Lock() + defer i.Unlock() + + peer, ok := i.peers[target] + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.pipelines = append(i.pipelines, pipeline) + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +// TimeoutNow implements the Transport interface. +func (i *InmemTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*TimeoutNowResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse) + req := RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + select { + case peer.consumerCh <- req: + case <-time.After(timeout): + err = fmt.Errorf("send timed out") + return + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. +func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (i *InmemTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer ServerAddress, t Transport) { + trans := t.(*InmemTransport) + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer ServerAddress) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[ServerAddress]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +// Close is used to permanently disable the transport +func (i *InmemTransport) Close() error { + i.DisconnectAll() + return nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr ServerAddress) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/log.go b/vendor/github.com/hashicorp/raft/log.go new file mode 100644 index 00000000..ad3bf0f0 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log.go @@ -0,0 +1,87 @@ +package raft + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeerDeprecated is used to add a new peer. This should only be used with + // older protocol versions designed to be compatible with unversioned + // Raft servers. See comments in config.go for details. + LogAddPeerDeprecated + + // LogRemovePeerDeprecated is used to remove an existing peer. This should only be + // used with older protocol versions designed to be compatible with + // unversioned Raft servers. See comments in config.go for details. + LogRemovePeerDeprecated + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier + + // LogConfiguration establishes a membership change configuration. It is + // created when a server is added, removed, promoted, etc. Only used + // when protocol version 1 or greater is in use. + LogConfiguration +) + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + // Index holds the index of the log entry. + Index uint64 + + // Term holds the election term of the log entry. + Term uint64 + + // Type holds the type of the log entry. + Type LogType + + // Data holds the log entry's type-specific data. + Data []byte + + // Extensions holds an opaque byte slice of information for middleware. It + // is up to the client of the library to properly modify this as it adds + // layers and remove those layers when appropriate. This value is a part of + // the log, so very large values could cause timing issues. + // + // N.B. It is _up to the client_ to handle upgrade paths. For instance if + // using this with go-raftchunking, the client should ensure that all Raft + // peers are using a version that can handle that extension before ever + // actually triggering chunking behavior. It is sometimes sufficient to + // ensure that non-leaders are upgraded first, then the current leader is + // upgraded, but a leader changeover during this process could lead to + // trouble, so gating extension behavior via some flag in the client + // program is also a good idea. + Extensions []byte +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // FirstIndex returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // LastIndex returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // GetLog gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // StoreLog stores a log entry. + StoreLog(log *Log) error + + // StoreLogs stores multiple log entries. + StoreLogs(logs []*Log) error + + // DeleteRange deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} diff --git a/vendor/github.com/hashicorp/raft/log_cache.go b/vendor/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 00000000..952e98c2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,79 @@ +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + // Insert the logs into the ring buffer + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + + return c.store.StoreLogs(logs) +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/vendor/github.com/hashicorp/raft/membership.md b/vendor/github.com/hashicorp/raft/membership.md new file mode 100644 index 00000000..df1f83e2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/membership.md @@ -0,0 +1,83 @@ +Simon (@superfell) and I (@ongardie) talked through reworking this library's cluster membership changes last Friday. We don't see a way to split this into independent patches, so we're taking the next best approach: submitting the plan here for review, then working on an enormous PR. Your feedback would be appreciated. (@superfell is out this week, however, so don't expect him to respond quickly.) + +These are the main goals: + - Bringing things in line with the description in my PhD dissertation; + - Catching up new servers prior to granting them a vote, as well as allowing permanent non-voting members; and + - Eliminating the `peers.json` file, to avoid issues of consistency between that and the log/snapshot. + +## Data-centric view + +We propose to re-define a *configuration* as a set of servers, where each server includes an address (as it does today) and a mode that is either: + - *Voter*: a server whose vote is counted in elections and whose match index is used in advancing the leader's commit index. + - *Nonvoter*: a server that receives log entries but is not considered for elections or commitment purposes. + - *Staging*: a server that acts like a nonvoter with one exception: once a staging server receives enough log entries to catch up sufficiently to the leader's log, the leader will invoke a membership change to change the staging server to a voter. + +All changes to the configuration will be done by writing a new configuration to the log. The new configuration will be in affect as soon as it is appended to the log (not when it is committed like a normal state machine command). Note that, per my dissertation, there can be at most one uncommitted configuration at a time (the next configuration may not be created until the prior one has been committed). It's not strictly necessary to follow these same rules for the nonvoter/staging servers, but we think its best to treat all changes uniformly. + +Each server will track two configurations: + 1. its *committed configuration*: the latest configuration in the log/snapshot that has been committed, along with its index. + 2. its *latest configuration*: the latest configuration in the log/snapshot (may be committed or uncommitted), along with its index. + +When there's no membership change happening, these two will be the same. The latest configuration is almost always the one used, except: + - When followers truncate the suffix of their logs, they may need to fall back to the committed configuration. + - When snapshotting, the committed configuration is written, to correspond with the committed log prefix that is being snapshotted. + + +## Application API + +We propose the following operations for clients to manipulate the cluster configuration: + - AddVoter: server becomes staging unless voter, + - AddNonvoter: server becomes nonvoter unless staging or voter, + - DemoteVoter: server becomes nonvoter unless absent, + - RemovePeer: server removed from configuration, + - GetConfiguration: waits for latest config to commit, returns committed config. + +This diagram, of which I'm quite proud, shows the possible transitions: +``` ++-----------------------------------------------------------------------------+ +| | +| Start -> +--------+ | +| ,------<------------| | | +| / | absent | | +| / RemovePeer--> | | <---RemovePeer | +| / | +--------+ \ | +| / | | \ | +| AddNonvoter | AddVoter \ | +| | ,->---' `--<-. | \ | +| v / \ v \ | +| +----------+ +----------+ +----------+ | +| | | ---AddVoter--> | | -log caught up --> | | | +| | nonvoter | | staging | | voter | | +| | | <-DemoteVoter- | | ,- | | | +| +----------+ \ +----------+ / +----------+ | +| \ / | +| `--------------<---------------' | +| | ++-----------------------------------------------------------------------------+ +``` + +While these operations aren't quite symmetric, we think they're a good set to capture +the possible intent of the user. For example, if I want to make sure a server doesn't have a vote, but the server isn't part of the configuration at all, it probably shouldn't be added as a nonvoting server. + +Each of these application-level operations will be interpreted by the leader and, if it has an effect, will cause the leader to write a new configuration entry to its log. Which particular application-level operation caused the log entry to be written need not be part of the log entry. + +## Code implications + +This is a non-exhaustive list, but we came up with a few things: +- Remove the PeerStore: the `peers.json` file introduces the possibility of getting out of sync with the log and snapshot, and it's hard to maintain this atomically as the log changes. It's not clear whether it's meant to track the committed or latest configuration, either. +- Servers will have to search their snapshot and log to find the committed configuration and the latest configuration on startup. +- Bootstrap will no longer use `peers.json` but should initialize the log or snapshot with an application-provided configuration entry. +- Snapshots should store the index of their configuration along with the configuration itself. In my experience with LogCabin, the original log index of the configuration is very useful to include in debug log messages. +- As noted in hashicorp/raft#84, configuration change requests should come in via a separate channel, and one may not proceed until the last has been committed. +- As to deciding when a log is sufficiently caught up, implementing a sophisticated algorithm *is* something that can be done in a separate PR. An easy and decent placeholder is: once the staging server has reached 95% of the leader's commit index, promote it. + +## Feedback + +Again, we're looking for feedback here before we start working on this. Here are some questions to think about: + - Does this seem like where we want things to go? + - Is there anything here that should be left out? + - Is there anything else we're forgetting about? + - Is there a good way to break this up? + - What do we need to worry about in terms of backwards compatibility? + - What implication will this have on current tests? + - What's the best way to test this code, in particular the small changes that will be sprinkled all over the library? diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 00000000..6092cafb --- /dev/null +++ b/vendor/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,779 @@ +package raft + +import ( + "bufio" + "context" + "errors" + "fmt" + "github.com/hashicorp/go-hclog" + "io" + "net" + "os" + "sync" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + rpcTimeoutNow + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // rpcMaxPipeline controls the maximum number of outstanding + // AppendEntries RPC calls. + rpcMaxPipeline = 128 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +/* + +NetworkTransport provides a network based transport that can be +used to communicate with Raft on remote machines. It requires +an underlying stream layer to provide a stream abstraction, which can +be simple TCP, TLS, etc. + +This transport is very simple and lightweight. Each RPC request is +framed by sending a byte that indicates the message type, followed +by the MsgPack encoded request. + +The response is an error string followed by the response object, +both are encoded using MsgPack. + +InstallSnapshot is special, in that after the RPC request we stream +the entire state. That socket is not re-used as the connection state +is not known if there is an error. + +*/ +type NetworkTransport struct { + connPool map[ServerAddress][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger hclog.Logger + + maxPool int + + serverAddressProvider ServerAddressProvider + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + // streamCtx is used to cancel existing connection handlers. + streamCtx context.Context + streamCancel context.CancelFunc + streamCtxLock sync.RWMutex + + timeout time.Duration + TimeoutScale int +} + +// NetworkTransportConfig encapsulates configuration for the network transport layer. +type NetworkTransportConfig struct { + // ServerAddressProvider is used to override the target address when establishing a connection to invoke an RPC + ServerAddressProvider ServerAddressProvider + + Logger hclog.Logger + + // Dialer + Stream StreamLayer + + // MaxPool controls how many connections we will pool + MaxPool int + + // Timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply + // the timeout by (SnapshotSize / TimeoutScale). + Timeout time.Duration +} + +// ServerAddressProvider is a target address to which we invoke an RPC when establishing a connection +type ServerAddressProvider interface { + ServerAddr(id ServerID) (ServerAddress, error) +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target ServerAddress + conn net.Conn + r *bufio.Reader + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransportWithConfig creates a new network transport with the given config struct +func NewNetworkTransportWithConfig( + config *NetworkTransportConfig, +) *NetworkTransport { + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "raft-net", + Output: hclog.DefaultOutput, + Level: hclog.DefaultLevel, + }) + } + trans := &NetworkTransport{ + connPool: make(map[ServerAddress][]*netConn), + consumeCh: make(chan RPC), + logger: config.Logger, + maxPool: config.MaxPool, + shutdownCh: make(chan struct{}), + stream: config.Stream, + timeout: config.Timeout, + TimeoutScale: DefaultTimeoutScale, + serverAddressProvider: config.ServerAddressProvider, + } + + // Create the connection context and then start our listener. + trans.setupStreamContext() + go trans.listen() + + return trans +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft-net", + Output: logOutput, + Level: hclog.DefaultLevel, + }) + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// NewNetworkTransportWithLogger creates a new network transport with the given logger, dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransportWithLogger( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logger hclog.Logger, +) *NetworkTransport { + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// setupStreamContext is used to create a new stream context. This should be +// called with the stream lock held. +func (n *NetworkTransport) setupStreamContext() { + ctx, cancel := context.WithCancel(context.Background()) + n.streamCtx = ctx + n.streamCancel = cancel +} + +// getStreamContext is used retrieve the current stream context. +func (n *NetworkTransport) getStreamContext() context.Context { + n.streamCtxLock.RLock() + defer n.streamCtxLock.RUnlock() + return n.streamCtx +} + +// SetHeartbeatHandler is used to setup a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// CloseStreams closes the current streams. +func (n *NetworkTransport) CloseStreams() { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + // Close all the connections in the connection pool and then remove their + // entry. + for k, e := range n.connPool { + for _, conn := range e { + conn.Release() + } + + delete(n.connPool, k) + } + + // Cancel the existing connections and create a new context. Both these + // operations must always be done with the lock held otherwise we can create + // connection handlers that are holding a context that will never be + // cancelable. + n.streamCtxLock.Lock() + n.streamCancel() + n.setupStreamContext() + n.streamCtxLock.Unlock() +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() ServerAddress { + return ServerAddress(n.stream.Addr().String()) +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConnFromAddressProvider returns a connection from the server address provider if available, or defaults to a connection using the target server address +func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target ServerAddress) (*netConn, error) { + address := n.getProviderAddressOrFallback(id, target) + return n.getConn(address) +} + +func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress { + if n.serverAddressProvider != nil { + serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id) + if err != nil { + n.logger.Warn("unable to get address for sever, using fallback address", "id", id, "fallback", target, "error", err) + } else { + return serverAddressOverride + } + } + return target +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + } + + // Setup encoder/decoders + netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns, _ := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + // Get a connection + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(id, target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(id, target, rpcRequestVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err = io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err = conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + address := n.getProviderAddressOrFallback(id, p) + return []byte(address) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// TimeoutNow implements the Transport interface. +func (n *NetworkTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + return n.genericRPC(id, target, rpcTimeoutNow, args, resp) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + const baseDelay = 5 * time.Millisecond + const maxDelay = 1 * time.Second + + var loopDelay time.Duration + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if loopDelay == 0 { + loopDelay = baseDelay + } else { + loopDelay *= 2 + } + + if loopDelay > maxDelay { + loopDelay = maxDelay + } + + if !n.IsShutdown() { + n.logger.Error("failed to accept connection", "error", err) + } + + select { + case <-n.shutdownCh: + return + case <-time.After(loopDelay): + continue + } + } + // No error, reset loop delay + loopDelay = 0 + + n.logger.Debug("accepted connection", "local-address", n.LocalAddr(), "remote-address", conn.RemoteAddr().String()) + + // Handle the connection in dedicated routine + go n.handleConn(n.getStreamContext(), conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. The +// handler will exit when the passed context is cancelled or the connection is +// closed. +func (n *NetworkTransport) handleConn(connCtx context.Context, conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) + + for { + select { + case <-connCtx.Done(): + n.logger.Debug("stream layer is closed") + return + default: + } + + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Error("failed to decode incoming command", "error", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Error("failed to flush response", "error", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + // Check if this is a heartbeat + if req.Term != 0 && req.Leader != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + + case rpcTimeoutNow: + var req TimeoutNowRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + select { + case resp := <-respCh: + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given +// transport and connection. +func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { + n := &netPipeline{ + conn: conn, + trans: trans, + doneCh: make(chan AppendFuture, rpcMaxPipeline), + inprogressCh: make(chan *appendFuture, rpcMaxPipeline), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Closed is used to shutdown the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/raft/observer.go new file mode 100644 index 00000000..1611d6b4 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/observer.go @@ -0,0 +1,131 @@ +package raft + +import ( + "sync/atomic" +) + +// Observation is sent along the given channel to observers when an event occurs. +type Observation struct { + // Raft holds the Raft instance generating the observation. + Raft *Raft + // Data holds observation-specific data. Possible types are + // *RequestVoteRequest + // RaftState + // PeerObservation + // LeaderObservation + Data interface{} +} + +// LeaderObservation is used for the data when leadership changes. +type LeaderObservation struct { + Leader ServerAddress +} + +// PeerObservation is sent to observers when peers change. +type PeerObservation struct { + Removed bool + Peer Server +} + +// nextObserverId is used to provide a unique ID for each observer to aid in +// deregistration. +var nextObserverID uint64 + +// FilterFn is a function that can be registered in order to filter observations. +// The function reports whether the observation should be included - if +// it returns false, the observation will be filtered out. +type FilterFn func(o *Observation) bool + +// Observer describes what to do with a given observation. +type Observer struct { + // numObserved and numDropped are performance counters for this observer. + // 64 bit types must be 64 bit aligned to use with atomic operations on + // 32 bit platforms, so keep them at the top of the struct. + numObserved uint64 + numDropped uint64 + + // channel receives observations. + channel chan Observation + + // blocking, if true, will cause Raft to block when sending an observation + // to this observer. This should generally be set to false. + blocking bool + + // filter will be called to determine if an observation should be sent to + // the channel. + filter FilterFn + + // id is the ID of this observer in the Raft map. + id uint64 +} + +// NewObserver creates a new observer that can be registered +// to make observations on a Raft instance. Observations +// will be sent on the given channel if they satisfy the +// given filter. +// +// If blocking is true, the observer will block when it can't +// send on the channel, otherwise it may discard events. +func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { + return &Observer{ + channel: channel, + blocking: blocking, + filter: filter, + id: atomic.AddUint64(&nextObserverID, 1), + } +} + +// GetNumObserved returns the number of observations. +func (or *Observer) GetNumObserved() uint64 { + return atomic.LoadUint64(&or.numObserved) +} + +// GetNumDropped returns the number of dropped observations due to blocking. +func (or *Observer) GetNumDropped() uint64 { + return atomic.LoadUint64(&or.numDropped) +} + +// RegisterObserver registers a new observer. +func (r *Raft) RegisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + r.observers[or.id] = or +} + +// DeregisterObserver deregisters an observer. +func (r *Raft) DeregisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + delete(r.observers, or.id) +} + +// observe sends an observation to every observer. +func (r *Raft) observe(o interface{}) { + // In general observers should not block. But in any case this isn't + // disastrous as we only hold a read lock, which merely prevents + // registration / deregistration of observers. + r.observersLock.RLock() + defer r.observersLock.RUnlock() + for _, or := range r.observers { + // It's wasteful to do this in the loop, but for the common case + // where there are no observers we won't create any objects. + ob := Observation{Raft: r, Data: o} + if or.filter != nil && !or.filter(&ob) { + continue + } + if or.channel == nil { + continue + } + if or.blocking { + or.channel <- ob + atomic.AddUint64(&or.numObserved, 1) + } else { + select { + case or.channel <- ob: + atomic.AddUint64(&or.numObserved, 1) + default: + atomic.AddUint64(&or.numDropped, 1) + } + } + } +} diff --git a/vendor/github.com/hashicorp/raft/peersjson.go b/vendor/github.com/hashicorp/raft/peersjson.go new file mode 100644 index 00000000..38ca2a8b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/peersjson.go @@ -0,0 +1,98 @@ +package raft + +import ( + "bytes" + "encoding/json" + "io/ioutil" +) + +// ReadPeersJSON consumes a legacy peers.json file in the format of the old JSON +// peer store and creates a new-style configuration structure. This can be used +// to migrate this data or perform manual recovery when running protocol versions +// that can interoperate with older, unversioned Raft servers. This should not be +// used once server IDs are in use, because the old peers.json file didn't have +// support for these, nor non-voter suffrage types. +func ReadPeersJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := ioutil.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. We can only specify + // voter roles here, and the ID has to be the same as the address. + var configuration Configuration + for _, peer := range peers { + server := Server{ + Suffrage: Voter, + ID: ServerID(peer), + Address: ServerAddress(peer), + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} + +// configEntry is used when decoding a new-style peers.json. +type configEntry struct { + // ID is the ID of the server (a UUID, usually). + ID ServerID `json:"id"` + + // Address is the host:port of the server. + Address ServerAddress `json:"address"` + + // NonVoter controls the suffrage. We choose this sense so people + // can leave this out and get a Voter by default. + NonVoter bool `json:"non_voter"` +} + +// ReadConfigJSON reads a new-style peers.json and returns a configuration +// structure. This can be used to perform manual recovery when running protocol +// versions that use server IDs. +func ReadConfigJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := ioutil.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []configEntry + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. + var configuration Configuration + for _, peer := range peers { + suffrage := Voter + if peer.NonVoter { + suffrage = Nonvoter + } + server := Server{ + Suffrage: suffrage, + ID: peer.ID, + Address: peer.Address, + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go new file mode 100644 index 00000000..af60c759 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -0,0 +1,1798 @@ +package raft + +import ( + "bytes" + "container/list" + "fmt" + "io" + "io/ioutil" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") +) + +// getRPCHeader returns an initialized RPCHeader struct for the given +// Raft instance. This structure is sent along with RPC requests and +// responses. +func (r *Raft) getRPCHeader() RPCHeader { + return RPCHeader{ + ProtocolVersion: r.conf.ProtocolVersion, + } +} + +// checkRPCHeader houses logic about whether this instance of Raft can process +// the given RPC message. +func (r *Raft) checkRPCHeader(rpc RPC) error { + // Get the header off the RPC message. + wh, ok := rpc.Command.(WithRPCHeader) + if !ok { + return fmt.Errorf("RPC does not have a header") + } + header := wh.GetRPCHeader() + + // First check is to just make sure the code can understand the + // protocol at all. + if header.ProtocolVersion < ProtocolVersionMin || + header.ProtocolVersion > ProtocolVersionMax { + return ErrUnsupportedProtocol + } + + // Second check is whether we should support this message, given the + // current protocol we are configured to run. This will drop support + // for protocol version 0 starting at protocol version 2, which is + // currently what we want, and in general support one version back. We + // may need to revisit this policy depending on how future protocol + // changes evolve. + if header.ProtocolVersion < r.conf.ProtocolVersion-1 { + return ErrUnsupportedProtocol + } + + return nil +} + +// getSnapshotVersion returns the snapshot version that should be used when +// creating snapshots, given the protocol version in use. +func getSnapshotVersion(protocolVersion ProtocolVersion) SnapshotVersion { + // Right now we only have two versions and they are backwards compatible + // so we don't need to look at the protocol version. + return 1 +} + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + leadershipTransferInProgress int32 // indicates that a leadership transfer is in progress. + commitCh chan struct{} + commitment *commitment + inflight *list.List // list of logFuture in log index order + replState map[ServerID]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// setLeader is used to modify the current leader of the cluster +func (r *Raft) setLeader(leader ServerAddress) { + r.leaderLock.Lock() + oldLeader := r.leader + r.leader = leader + r.leaderLock.Unlock() + if oldLeader != leader { + r.observe(LeaderObservation{Leader: leader}) + } +} + +// requestConfigChange is a helper for the above functions that make +// configuration change requests. 'req' describes the change. For timeout, +// see AddVoter. +func (r *Raft) requestConfigChange(req configurationChangeRequest, timeout time.Duration) IndexFuture { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + future := &configurationChangeFuture{ + req: req, + } + future.init() + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case r.configurationChangeCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// run is a long running goroutine that runs the Raft FSM. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("") + return + default: + } + + // Enter into a sub-FSM + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the FSM for a follower. +func (r *Raft) runFollower() { + didWarn := false + r.logger.Info("entering follower state", "follower", r, "leader", r.Leader()) + metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) + heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) + + for r.getState() == Follower { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case r := <-r.userRestoreCh: + // Reject any restores since we are not the leader + r.respond(ErrNotLeader) + + case r := <-r.leadershipTransferCh: + // Reject any operations since we are not the leader + r.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(r.liveBootstrap(b.configuration)) + + case <-heartbeatTimer: + // Restart the heartbeat timer + heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + lastLeader := r.Leader() + r.setLeader("") + + if r.configurations.latestIndex == 0 { + if !didWarn { + r.logger.Warn("no known peers, aborting election") + didWarn = true + } + } else if r.configurations.latestIndex == r.configurations.committedIndex && + !hasVote(r.configurations.latest, r.localID) { + if !didWarn { + r.logger.Warn("not part of stable configuration, aborting election") + didWarn = true + } + } else { + r.logger.Warn("heartbeat timeout reached, starting election", "last-leader", lastLeader) + metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) + r.setState(Candidate) + return + } + + case <-r.shutdownCh: + return + } + } +} + +// liveBootstrap attempts to seed an initial configuration for the cluster. See +// the Raft object's member BootstrapCluster for more details. This must only be +// called on the main thread, and only makes sense in the follower state. +func (r *Raft) liveBootstrap(configuration Configuration) error { + // Use the pre-init API to make the static updates. + err := BootstrapCluster(&r.conf, r.logs, r.stable, r.snapshots, + r.trans, configuration) + if err != nil { + return err + } + + // Make the configuration live. + var entry Log + if err := r.logs.GetLog(1, &entry); err != nil { + panic(err) + } + r.setCurrentTerm(1) + r.setLastLog(entry.Index, entry.Term) + r.processConfigurationLogEntry(&entry) + return nil +} + +// runCandidate runs the FSM for a candidate. +func (r *Raft) runCandidate() { + r.logger.Info("entering candidate state", "node", r, "term", r.getCurrentTerm()+1) + metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) + + // Start vote for us, and set a timeout + voteCh := r.electSelf() + + // Make sure the leadership transfer flag is reset after each run. Having this + // flag will set the field LeadershipTransfer in a RequestVoteRequst to true, + // which will make other servers vote even though they have a leader already. + // It is important to reset that flag, because this priviledge could be abused + // otherwise. + defer func() { r.candidateFromLeadershipTransfer = false }() + + electionTimer := randomTimeout(r.conf.ElectionTimeout) + + // Tally the votes, need a simple majority + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Debug("votes", "needed", votesNeeded) + + for r.getState() == Candidate { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case vote := <-voteCh: + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Debug("newer term discovered, fallback to follower") + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Debug("vote granted", "from", vote.voterID, "term", vote.Term, "tally", grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Info("election won", "tally", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr) + return + } + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case r := <-r.userRestoreCh: + // Reject any restores since we are not the leader + r.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case <-electionTimer: + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Warn("Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +func (r *Raft) setLeadershipTransferInProgress(v bool) { + if v { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 1) + } else { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 0) + } +} + +func (r *Raft) getLeadershipTransferInProgress() bool { + v := atomic.LoadInt32(&r.leaderState.leadershipTransferInProgress) + if v == 1 { + return true + } + return false +} + +func (r *Raft) setupLeaderState() { + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.commitment = newCommitment(r.leaderState.commitCh, + r.configurations.latest, + r.getLastIndex()+1 /* first index that may be committed in this term */) + r.leaderState.inflight = list.New() + r.leaderState.replState = make(map[ServerID]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) +} + +// runLeader runs the FSM for a leader. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Info("entering leader state", "leader", r) + metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) + + // Notify that we are the leader + asyncNotifyBool(r.leaderCh, true) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + } + } + + // setup leader state. This is only supposed to be accessed within the + // leaderloop. + r.setupLeaderState() + + // Cleanup state on step down + defer func() { + // Since we were the leader previously, we update our + // last contact time when we step down, so that we are not + // reporting a last contact time from before we were the + // leader. Otherwise, to a client it would seem our data + // is extremely stale. + r.setLastContact() + + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Respond to all inflight operations + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(ErrLeadershipLost) + } + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.commitment = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leader == r.localAddr { + r.leader = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + asyncNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + // On shutdown, make a best effort but do not block + select { + case notify <- false: + default: + } + } + } + }() + + // Start a replication routine for each peer + r.startStopReplication() + + // Dispatch a no-op log entry first. This gets this leader up to the latest + // possible commit index, even in the absence of client commands. This used + // to append a configuration entry instead of a noop. However, that permits + // an unbounded number of uncommitted configurations in the log. We now + // maintain that there exists at most one uncommitted configuration entry in + // any log, so we have to do proper no-ops here. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + r.dispatchLogs([]*logFuture{noop}) + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startStopReplication will set up state and start asynchronous replication to +// new peers, and stop replication to removed peers. Before removing a peer, +// it'll instruct the replication routines to try to replicate to the current +// index. This must only be called from the main thread. +func (r *Raft) startStopReplication() { + inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers)) + lastIdx := r.getLastIndex() + + // Start replication goroutines that need starting + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + inConfig[server.ID] = true + if _, ok := r.leaderState.replState[server.ID]; !ok { + r.logger.Info("added peer, starting replication", "peer", server.ID) + s := &followerReplication{ + peer: server, + commitment: r.leaderState.commitment, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + triggerDeferErrorCh: make(chan *deferError, 1), + currentTerm: r.getCurrentTerm(), + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notify: make(map[*verifyFuture]struct{}), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + r.leaderState.replState[server.ID] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) + r.observe(PeerObservation{Peer: server, Removed: false}) + } + } + + // Stop replication goroutines that need stopping + for serverID, repl := range r.leaderState.replState { + if inConfig[serverID] { + continue + } + // Replicate up to lastIdx and stop + r.logger.Info("removed peer, stopping replication", "peer", serverID, "last-index", lastIdx) + repl.stopCh <- lastIdx + close(repl.stopCh) + delete(r.leaderState.replState, serverID) + r.observe(PeerObservation{Peer: repl.peer, Removed: true}) + } +} + +// configurationChangeChIfStable returns r.configurationChangeCh if it's safe +// to process requests from it, or nil otherwise. This must only be called +// from the main thread. +// +// Note that if the conditions here were to change outside of leaderLoop to take +// this from nil to non-nil, we would need leaderLoop to be kicked. +func (r *Raft) configurationChangeChIfStable() chan *configurationChangeFuture { + // Have to wait until: + // 1. The latest configuration is committed, and + // 2. This leader has committed some entry (the noop) in this term + // https://groups.google.com/forum/#!msg/raft-dev/t4xj6dJTP6E/d2D9LrWRza8J + if r.configurations.latestIndex == r.configurations.committedIndex && + r.getCommitIndex() >= r.leaderState.commitment.startIndex { + return r.configurationChangeCh + } + return nil +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + // stepDown is used to track if there is an inflight log that + // would cause us to lose leadership (specifically a RemovePeer of + // ourselves). If this is the case, we must not allow any logs to + // be processed in parallel, otherwise we are basing commit on + // only a single peer (ourself) and replicating to an undefined set + // of peers. + stepDown := false + lease := time.After(r.conf.LeaderLeaseTimeout) + + for r.getState() == Leader { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.setState(Follower) + + case future := <-r.leadershipTransferCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + + r.logger.Debug("starting leadership transfer", "id", future.ID, "address", future.Address) + + // When we are leaving leaderLoop, we are no longer + // leader, so we should stop transferring. + leftLeaderLoop := make(chan struct{}) + defer func() { close(leftLeaderLoop) }() + + stopCh := make(chan struct{}) + doneCh := make(chan error, 1) + + // This is intentionally being setup outside of the + // leadershipTransfer function. Because the TimeoutNow + // call is blocking and there is no way to abort that + // in case eg the timer expires. + // The leadershipTransfer function is controlled with + // the stopCh and doneCh. + go func() { + select { + case <-time.After(r.conf.ElectionTimeout): + close(stopCh) + err := fmt.Errorf("leadership transfer timeout") + r.logger.Debug(err.Error()) + future.respond(err) + <-doneCh + case <-leftLeaderLoop: + close(stopCh) + err := fmt.Errorf("lost leadership during transfer (expected)") + r.logger.Debug(err.Error()) + future.respond(nil) + <-doneCh + case err := <-doneCh: + if err != nil { + r.logger.Debug(err.Error()) + } + future.respond(err) + } + }() + + // leaderState.replState is accessed here before + // starting leadership transfer asynchronously because + // leaderState is only supposed to be accessed in the + // leaderloop. + id := future.ID + address := future.Address + if id == nil { + s := r.pickServer() + if s != nil { + id = &s.ID + address = &s.Address + } else { + doneCh <- fmt.Errorf("cannot find peer") + continue + } + } + state, ok := r.leaderState.replState[*id] + if !ok { + doneCh <- fmt.Errorf("cannot find replication state for %v", id) + continue + } + + go r.leadershipTransfer(*id, *address, state, stopCh, doneCh) + + case <-r.leaderState.commitCh: + // Process the newly committed entries + oldCommitIndex := r.getCommitIndex() + commitIndex := r.leaderState.commitment.getCommitIndex() + r.setCommitIndex(commitIndex) + + // New configration has been committed, set it as the committed + // value. + if r.configurations.latestIndex > oldCommitIndex && + r.configurations.latestIndex <= commitIndex { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + if !hasVote(r.configurations.committed, r.localID) { + stepDown = true + } + } + + start := time.Now() + var groupReady []*list.Element + var groupFutures = make(map[uint64]*logFuture) + var lastIdxInGroup uint64 + + // Pull all inflight logs that are committed off the queue. + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + commitLog := e.Value.(*logFuture) + idx := commitLog.log.Index + if idx > commitIndex { + // Don't go past the committed index + break + } + + // Measure the commit time + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + groupReady = append(groupReady, e) + groupFutures[idx] = commitLog + lastIdxInGroup = idx + } + + // Process the group + if len(groupReady) != 0 { + r.processLogs(lastIdxInGroup, groupFutures) + + for _, e := range groupReady { + r.leaderState.inflight.Remove(e) + } + } + + // Measure the time to enqueue batch of logs for FSM to apply + metrics.MeasureSince([]string{"raft", "fsm", "enqueue"}, start) + + // Count the number of logs enqueued + metrics.SetGauge([]string{"raft", "commitNumLogs"}, float32(len(groupReady))) + + if stepDown { + if r.conf.ShutdownOnRemove { + r.logger.Info("removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Info("removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case v := <-r.verifyCh: + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Warn("new leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(nil) + } + + case future := <-r.userRestoreCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + err := r.restoreUserSnapshot(future.meta, future.reader) + future.respond(err) + + case future := <-r.configurationsCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + future.configurations = r.configurations.Clone() + future.respond(nil) + + case future := <-r.configurationChangeChIfStable(): + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + r.appendConfigurationEntry(future) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case newLog := <-r.applyCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + newLog.respond(ErrLeadershipTransferInProgress) + continue + } + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + GROUP_COMMIT_LOOP: + for i := 0; i < r.conf.MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break GROUP_COMMIT_LOOP + } + } + + // Dispatch the logs + if stepDown { + // we're in the process of stepping down as leader, don't process anything new + for i := range ready { + ready[i].respond(ErrNotLeader) + } + } else { + r.dispatchLogs(ready) + } + + case <-lease: + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.conf.LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify[v] = struct{}{} + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// leadershipTransfer is doing the heavy lifting for the leadership transfer. +func (r *Raft) leadershipTransfer(id ServerID, address ServerAddress, repl *followerReplication, stopCh chan struct{}, doneCh chan error) { + + // make sure we are not already stopped + select { + case <-stopCh: + doneCh <- nil + return + default: + } + + // Step 1: set this field which stops this leader from responding to any client requests. + r.setLeadershipTransferInProgress(true) + defer func() { r.setLeadershipTransferInProgress(false) }() + + for atomic.LoadUint64(&repl.nextIndex) <= r.getLastIndex() { + err := &deferError{} + err.init() + repl.triggerDeferErrorCh <- err + select { + case err := <-err.errCh: + if err != nil { + doneCh <- err + return + } + case <-stopCh: + doneCh <- nil + return + } + } + + // Step ?: the thesis describes in chap 6.4.1: Using clocks to reduce + // messaging for read-only queries. If this is implemented, the lease + // has to be reset as well, in case leadership is transferred. This + // implementation also has a lease, but it serves another purpose and + // doesn't need to be reset. The lease mechanism in our raft lib, is + // setup in a similar way to the one in the thesis, but in practice + // it's a timer that just tells the leader how often to check + // heartbeats are still coming in. + + // Step 3: send TimeoutNow message to target server. + err := r.trans.TimeoutNow(id, address, &TimeoutNowRequest{RPCHeader: r.getRPCHeader()}, &TimeoutNowResponse{}) + if err != nil { + err = fmt.Errorf("failed to make TimeoutNow RPC to %v: %v", id, err) + } + doneCh <- err +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. This must only be called from the main thread. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 0 + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + contacted++ + continue + } + f := r.leaderState.replState[server.ID] + diff := now.Sub(f.LastContact()) + if diff <= r.conf.LeaderLeaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*r.conf.LeaderLeaseTimeout { + r.logger.Warn("failed to contact", "server-id", server.ID, "time", diff) + } else { + r.logger.Debug("failed to contact", "server-id", server.ID, "time", diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Warn("failed to contact quorum of nodes, stepping down") + r.setState(Follower) + metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) + } + return maxDiff +} + +// quorumSize is used to return the quorum size. This must only be called on +// the main thread. +// TODO: revisit usage +func (r *Raft) quorumSize() int { + voters := 0 + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + voters++ + } + } + return voters/2 + 1 +} + +// restoreUserSnapshot is used to manually consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the higher of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and returns a future that can be used to +// block until complete. +func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { + defer metrics.MeasureSince([]string{"raft", "restoreUserSnapshot"}, time.Now()) + + // Sanity check the version. + version := meta.Version + if version < SnapshotVersionMin || version > SnapshotVersionMax { + return fmt.Errorf("unsupported snapshot version %d", version) + } + + // We don't support snapshots while there's a config change + // outstanding since the snapshot doesn't have a means to + // represent this state. + committedIndex := r.configurations.committedIndex + latestIndex := r.configurations.latestIndex + if committedIndex != latestIndex { + return fmt.Errorf("cannot restore snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + latestIndex, committedIndex) + } + + // Cancel any inflight requests. + for { + e := r.leaderState.inflight.Front() + if e == nil { + break + } + e.Value.(*logFuture).respond(ErrAbortedByRestore) + r.leaderState.inflight.Remove(e) + } + + // We will overwrite the snapshot metadata with the current term, + // an index that's greater than the current index, or the last + // index in the snapshot. It's important that we leave a hole in + // the index so we know there's nothing in the Raft log there and + // replication will fault and send the snapshot. + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + if meta.Index > lastIndex { + lastIndex = meta.Index + } + lastIndex++ + + // Dump the snapshot. Note that we use the latest configuration, + // not the one that came with the snapshot. + sink, err := r.snapshots.Create(version, lastIndex, term, + r.configurations.latest, r.configurations.latestIndex, r.trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + n, err := io.Copy(sink, reader) + if err != nil { + sink.Cancel() + return fmt.Errorf("failed to write snapshot: %v", err) + } + if n != meta.Size { + sink.Cancel() + return fmt.Errorf("failed to write snapshot, size didn't match (%d != %d)", n, meta.Size) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + r.logger.Info("copied to local snapshot", "bytes", n) + + // Restore the snapshot into the FSM. If this fails we are in a + // bad state so we panic to take ourselves out. + fsm := &restoreFuture{ID: sink.ID()} + fsm.init() + select { + case r.fsmMutateCh <- fsm: + case <-r.shutdownCh: + return ErrRaftShutdown + } + if err := fsm.Error(); err != nil { + panic(fmt.Errorf("failed to restore snapshot: %v", err)) + } + + // We set the last log so it looks like we've stored the empty + // index we burned. The last applied is set because we made the + // FSM take the snapshot state, and we store the last snapshot + // in the stable store since we created a snapshot as part of + // this process. + r.setLastLog(lastIndex, term) + r.setLastApplied(lastIndex) + r.setLastSnapshot(lastIndex, term) + + r.logger.Info("restored user snapshot", "index", latestIndex) + return nil +} + +// appendConfigurationEntry changes the configuration and adds a new +// configuration entry to the log. This must only be called from the +// main thread. +func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { + configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req) + if err != nil { + future.respond(err) + return + } + + r.logger.Info("updating configuration", + "command", future.req.command, + "server-id", future.req.serverID, + "server-addr", future.req.serverAddress, + "servers", hclog.Fmt("%+v", configuration.Servers)) + + // In pre-ID compatibility mode we translate all configuration changes + // in to an old remove peer message, which can handle all supported + // cases for peer changes in the pre-ID world (adding and removing + // voters). Both add peer and remove peer log entries are handled + // similarly on old Raft servers, but remove peer does extra checks to + // see if a leader needs to step down. Since they both assert the full + // configuration, then we can safely call remove peer for everything. + if r.protocolVersion < 2 { + future.log = Log{ + Type: LogRemovePeerDeprecated, + Data: encodePeers(configuration, r.trans), + } + } else { + future.log = Log{ + Type: LogConfiguration, + Data: EncodeConfiguration(configuration), + } + } + + r.dispatchLogs([]*logFuture{&future.logFuture}) + index := future.Index() + r.configurations.latest = configuration + r.configurations.latestIndex = index + r.leaderState.commitment.setConfiguration(configuration) + r.startStopReplication() +} + +// dispatchLog is called on the leader to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + + n := len(applyLogs) + logs := make([]*Log, n) + metrics.SetGauge([]string{"raft", "leader", "dispatchNumLogs"}, float32(n)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + lastIndex++ + applyLog.log.Index = lastIndex + applyLog.log.Term = term + logs[idx] = &applyLog.log + r.leaderState.inflight.PushBack(applyLog) + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Error("failed to commit logs", "error", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + r.leaderState.commitment.match(r.localID, lastIndex) + + // Update the last log since it's on disk now + r.setLastLog(lastIndex, term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to apply all the committed entries that haven't been +// applied up to the given index limit. +// This can be called from both leaders and followers. +// Followers call this from AppendEntries, for n entries at a time, and always +// pass futures=nil. +// Leaders call this when entries are committed. They pass the futures from any +// inflight logs. +func (r *Raft) processLogs(index uint64, futures map[uint64]*logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Warn("skipping application of old log", "index", index) + return + } + + applyBatch := func(batch []*commitTuple) { + select { + case r.fsmMutateCh <- batch: + case <-r.shutdownCh: + for _, cl := range batch { + if cl.future != nil { + cl.future.respond(ErrRaftShutdown) + } + } + } + } + + batch := make([]*commitTuple, 0, r.conf.MaxAppendEntries) + + // Apply all the preceding logs + for idx := lastApplied + 1; idx <= index; idx++ { + var preparedLog *commitTuple + // Get the log, either from the future or from our log store + future, futureOk := futures[idx] + if futureOk { + preparedLog = r.prepareLog(&future.log, future) + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Error("failed to get log", "index", idx, "error", err) + panic(err) + } + preparedLog = r.prepareLog(l, nil) + } + + switch { + case preparedLog != nil: + // If we have a log ready to send to the FSM add it to the batch. + // The FSM thread will respond to the future. + batch = append(batch, preparedLog) + + // If we have filled up a batch, send it to the FSM + if len(batch) >= r.conf.MaxAppendEntries { + applyBatch(batch) + batch = make([]*commitTuple, 0, r.conf.MaxAppendEntries) + } + + case futureOk: + // Invoke the future if given. + future.respond(nil) + } + } + + // If there are any remaining logs in the batch apply them + if len(batch) != 0 { + applyBatch(batch) + } + + // Update the lastApplied index and term + r.setLastApplied(index) +} + +// processLog is invoked to process the application of a single committed log entry. +func (r *Raft) prepareLog(l *Log, future *logFuture) *commitTuple { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + return &commitTuple{l, future} + + case LogConfiguration: + // Only support this with the v2 configuration format + if r.protocolVersion > 2 { + return &commitTuple{l, future} + } + case LogAddPeerDeprecated: + case LogRemovePeerDeprecated: + case LogNoop: + // Ignore the no-op + + default: + panic(fmt.Errorf("unrecognized log type: %#v", l)) + } + + return nil +} + +// processRPC is called to handle an incoming RPC request. This must only be +// called from the main thread. +func (r *Raft) processRPC(rpc RPC) { + if err := r.checkRPCHeader(rpc); err != nil { + rpc.Respond(nil, err) + return + } + + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + case *TimeoutNowRequest: + r.timeoutNow(rpc, cmd) + default: + r.logger.Error("got unexpected command", + "command", hclog.Fmt("%#v", rpc.Command)) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. This must only +// be called from the main thread. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Error("expected heartbeat, got", "command", hclog.Fmt("%#v", rpc.Command)) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. This must +// only be called from the main thread. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + NoRetryBackoff: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || r.getState() != Follower { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(a.Leader))) + + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Warn("failed to get previous log", + "previous-index", a.PrevLogEntry, + "last-index", lastIdx, + "error", err) + resp.NoRetryBackoff = true + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Warn("previous log term mis-match", + "ours", prevLogTerm, + "remote", a.PrevLogTerm) + resp.NoRetryBackoff = true + return + } + } + + // Process any new entries + if len(a.Entries) > 0 { + start := time.Now() + + // Delete any conflicting entries, skip any duplicates + lastLogIdx, _ := r.getLastLog() + var newEntries []*Log + for i, entry := range a.Entries { + if entry.Index > lastLogIdx { + newEntries = a.Entries[i:] + break + } + var storeEntry Log + if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { + r.logger.Warn("failed to get log entry", + "index", entry.Index, + "error", err) + return + } + if entry.Term != storeEntry.Term { + r.logger.Warn("clearing log suffix", + "from", entry.Index, + "to", lastLogIdx) + if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { + r.logger.Error("failed to clear log suffix", "error", err) + return + } + if entry.Index <= r.configurations.latestIndex { + r.configurations.latest = r.configurations.committed + r.configurations.latestIndex = r.configurations.committedIndex + } + newEntries = a.Entries[i:] + break + } + } + + if n := len(newEntries); n > 0 { + // Append the new entries + if err := r.logs.StoreLogs(newEntries); err != nil { + r.logger.Error("failed to append to logs", "error", err) + // TODO: leaving r.getLastLog() in the wrong + // state if there was a truncation above + return + } + + // Handle any new configuration changes + for _, newEntry := range newEntries { + r.processConfigurationLogEntry(newEntry) + } + + // Update the lastLog + last := newEntries[n-1] + r.setLastLog(last.Index, last.Term) + } + + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + if r.configurations.latestIndex <= idx { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + } + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.setLastContact() + return +} + +// processConfigurationLogEntry takes a log entry and updates the latest +// configuration if the entry results in a new configuration. This must only be +// called from the main thread, or from NewRaft() before any threads have begun. +func (r *Raft) processConfigurationLogEntry(entry *Log) { + if entry.Type == LogConfiguration { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = DecodeConfiguration(entry.Data) + r.configurations.latestIndex = entry.Index + } else if entry.Type == LogAddPeerDeprecated || entry.Type == LogRemovePeerDeprecated { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = decodePeers(entry.Data, r.trans) + r.configurations.latestIndex = entry.Index + } +} + +// requestVote is invoked when we get an request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Version 0 servers will panic unless the peers is present. It's only + // used on them to produce a warning message. + if r.protocolVersion < 2 { + resp.Peers = encodePeers(r.configurations.latest, r.trans) + } + + // Check if we have an existing leader [who's not the candidate] and also + // check the LeadershipTransfer flag is set. Usually votes are rejected if + // there is a known leader. But if the leader initiated a leadership transfer, + // vote! + candidate := r.trans.DecodePeer(req.Candidate) + if leader := r.Leader(); leader != "" && leader != candidate && !req.LeadershipTransfer { + r.logger.Warn("rejecting vote request since we have a leader", + "from", candidate, + "leader", leader) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.logger.Debug("lost leadership because received a requestVote with a newer term") + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Error("failed to get last vote term", "error", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Error("failed to get last vote candidate", "error", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Info("duplicate requestVote for same term", "term", req.Term) + if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { + r.logger.Warn("duplicate requestVote from", "candidate", req.Candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Warn("rejecting vote request since our last term is greater", + "candidate", candidate, + "last-term", lastTerm, + "last-candidate-term", req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Warn("rejecting vote request since our last index is greater", + "candidate", candidate, + "last-index", lastIdx, + "last-candidate-index", req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Error("failed to persist vote", "error", err) + return + } + + resp.Granted = true + r.setLastContact() + return +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. This must only be called +// from the main thread. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + io.Copy(ioutil.Discard, rpc.Reader) // ensure we always consume all the snapshot data from the stream [see issue #212] + rpc.Respond(resp, rpcErr) + }() + + // Sanity check the version + if req.SnapshotVersion < SnapshotVersionMin || + req.SnapshotVersion > SnapshotVersionMax { + rpcErr = fmt.Errorf("unsupported snapshot version %d", req.SnapshotVersion) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + r.logger.Info("ignoring installSnapshot request with older term than current term", + "request-term", req.Term, + "current-term", r.getCurrentTerm()) + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(req.Leader))) + + // Create a new snapshot + var reqConfiguration Configuration + var reqConfigurationIndex uint64 + if req.SnapshotVersion > 0 { + reqConfiguration = DecodeConfiguration(req.Configuration) + reqConfigurationIndex = req.ConfigurationIndex + } else { + reqConfiguration = decodePeers(req.Peers, r.trans) + reqConfigurationIndex = req.LastLogIndex + } + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm, + reqConfiguration, reqConfigurationIndex, r.trans) + if err != nil { + r.logger.Error("failed to create snapshot to install", "error", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Spill the remote snapshot to disk + n, err := io.Copy(sink, rpc.Reader) + if err != nil { + sink.Cancel() + r.logger.Error("failed to copy snapshot", "error", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Error("failed to receive whole snapshot", + "received", hclog.Fmt("%d / %d", n, req.Size)) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Error("failed to finalize snapshot", "error", err) + rpcErr = err + return + } + r.logger.Info("copied to local snapshot", "bytes", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.init() + select { + case r.fsmMutateCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Error("failed to restore snapshot", "error", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm) + + // Restore the peer set + r.configurations.latest = reqConfiguration + r.configurations.latestIndex = reqConfigurationIndex + r.configurations.committed = reqConfiguration + r.configurations.committedIndex = reqConfigurationIndex + + // Compact logs, continue even if this fails + if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Error("failed to compact logs", "error", err) + } + + r.logger.Info("Installed remote snapshot") + resp.Success = true + r.setLastContact() + return +} + +// setLastContact is used to set the last contact time to now +func (r *Raft) setLastContact() { + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() +} + +type voteResult struct { + RequestVoteResponse + voterID ServerID +} + +// electSelf is used to send a RequestVote RPC to all peers, and vote for +// ourself. This has the side affecting of incrementing the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). This must only be called from the main thread. +func (r *Raft) electSelf() <-chan *voteResult { + // Create a response channel + respCh := make(chan *voteResult, len(r.configurations.latest.Servers)) + + // Increment the term + r.setCurrentTerm(r.getCurrentTerm() + 1) + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + LeadershipTransfer: r.candidateFromLeadershipTransfer, + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := &voteResult{voterID: peer.ID} + err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse) + if err != nil { + r.logger.Error("failed to make requestVote RPC", + "target", peer, + "error", err) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Error("failed to persist vote", "error", err) + return nil + } + // Include our own vote + respCh <- &voteResult{ + RequestVoteResponse: RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + askPeer(server) + } + } + } + + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("") + oldState := r.raftState.getState() + r.raftState.setState(state) + if oldState != state { + r.observe(state) + } +} + +// LookupServer looks up a server by ServerID. +func (r *Raft) lookupServer(id ServerID) *Server { + for _, server := range r.configurations.latest.Servers { + if server.ID != r.localID { + return &server + } + } + return nil +} + +// pickServer returns the follower that is most up to date. Because it accesses +// leaderstate, it should only be called from the leaderloop. +func (r *Raft) pickServer() *Server { + var pick *Server + var current uint64 + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + state, ok := r.leaderState.replState[server.ID] + if !ok { + continue + } + nextIdx := atomic.LoadUint64(&state.nextIndex) + if nextIdx > current { + current = nextIdx + tmp := server + pick = &tmp + } + } + return pick +} + +// initiateLeadershipTransfer starts the leadership on the leader side, by +// sending a message to the leadershipTransferCh, to make sure it runs in the +// mainloop. +func (r *Raft) initiateLeadershipTransfer(id *ServerID, address *ServerAddress) LeadershipTransferFuture { + future := &leadershipTransferFuture{ID: id, Address: address} + future.init() + + if id != nil && *id == r.localID { + err := fmt.Errorf("cannot transfer leadership to itself") + r.logger.Info(err.Error()) + future.respond(err) + return future + } + + select { + case r.leadershipTransferCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + default: + return errorFuture{ErrEnqueueTimeout} + } +} + +// timeoutNow is what happens when a server receives a TimeoutNowRequest. +func (r *Raft) timeoutNow(rpc RPC, req *TimeoutNowRequest) { + r.setLeader("") + r.setState(Candidate) + r.candidateFromLeadershipTransfer = true + rpc.Respond(&TimeoutNowResponse{}, nil) +} diff --git a/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/raft/replication.go new file mode 100644 index 00000000..1e2f2db7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/replication.go @@ -0,0 +1,599 @@ +package raft + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +// followerReplication is in charge of sending snapshots and log entries from +// this leader during this particular term to a remote follower. +type followerReplication struct { + // currentTerm and nextIndex must be kept at the top of the struct so + // they're 64 bit aligned which is a requirement for atomic ops on 32 bit + // platforms. + + // currentTerm is the term of this leader, to be included in AppendEntries + // requests. + currentTerm uint64 + + // nextIndex is the index of the next log entry to send to the follower, + // which may fall past the end of the log. + nextIndex uint64 + + // peer contains the network address and ID of the remote follower. + peer Server + + // commitment tracks the entries acknowledged by followers so that the + // leader's commit index can advance. It is updated on successful + // AppendEntries responses. + commitment *commitment + + // stopCh is notified/closed when this leader steps down or the follower is + // removed from the cluster. In the follower removed case, it carries a log + // index; replication should be attempted with a best effort up through that + // index, before exiting. + stopCh chan uint64 + + // triggerCh is notified every time new entries are appended to the log. + triggerCh chan struct{} + + // triggerDeferErrorCh is used to provide a backchannel. By sending a + // deferErr, the sender can be notifed when the replication is done. + triggerDeferErrorCh chan *deferError + + // lastContact is updated to the current time whenever any response is + // received from the follower (successful or not). This is used to check + // whether the leader should step down (Raft.checkLeaderLease()). + lastContact time.Time + // lastContactLock protects 'lastContact'. + lastContactLock sync.RWMutex + + // failures counts the number of failed RPCs since the last success, which is + // used to apply backoff. + failures uint64 + + // notifyCh is notified to send out a heartbeat, which is used to check that + // this server is still leader. + notifyCh chan struct{} + // notify is a map of futures to be resolved upon receipt of an + // acknowledgement, then cleared from this map. + notify map[*verifyFuture]struct{} + // notifyLock protects 'notify'. + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to determine when to pipeline the AppendEntries RPCs. + // It is private to this replication goroutine. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = make(map[*verifyFuture]struct{}) + s.notifyLock.Unlock() + + // Submit our votes + for v := range n { + v.vote(leader) + } +} + +// cleanNotify is used to delete notify, . +func (s *followerReplication) cleanNotify(v *verifyFuture) { + s.notifyLock.Lock() + delete(s.notify, v) + s.notifyLock.Unlock() +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that replicates log entries to a single +// follower. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + // This is _not_ our heartbeat mechanism but is to ensure + // followers quickly learn the leader's commit index when + // raft commits stop flowing naturally. The actual heartbeats + // can't do this to keep them unblocked by disk IO on the + // follower. See https://github.com/hashicorp/raft/issues/282. + case <-randomTimeout(r.conf.CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + r.logger.Error("failed to start pipeline replication to", "peer", s.peer, "error", err) + } + } + goto RPC +} + +// replicateTo is a helper to replicate(), used to replicate the logs up to a +// given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + // Setup the request + if err := r.setupAppendEntries(s, &req, atomic.LoadUint64(&s.nextIndex), lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { + r.logger.Error("failed to appendEntries to", "peer", s.peer, "error", err) + s.failures++ + return + } + appendStats(string(s.peer.ID), start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + atomic.StoreUint64(&s.nextIndex, max(min(s.nextIndex-1, resp.LastLog+1), 1)) + if resp.NoRetryBackoff { + s.failures = 0 + } else { + s.failures++ + } + r.logger.Warn("appendEntries rejected, sending older logs", "peer", s.peer, "next", atomic.LoadUint64(&s.nextIndex)) + } + +CHECK_MORE: + // Poll the stop channel here in case we are looping and have been asked + // to stop, or have stepped down as leader. Even for the best effort case + // where we are asked to replicate to a given index and then shutdown, + // it's better to not loop in here to send lots of entries to a straggler + // that's leaving the cluster anyways. + select { + case <-s.stopCh: + return true + default: + } + + // Check if there are more logs to replicate + if atomic.LoadUint64(&s.nextIndex) <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Error("failed to send snapshot to", "peer", s.peer, "error", err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Error("failed to list snapshots", "error", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Error("failed to open snapshot", "id", snapID, "error", err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + RPCHeader: r.getRPCHeader(), + SnapshotVersion: meta.Version, + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + Configuration: EncodeConfiguration(meta.Configuration), + ConfigurationIndex: meta.ConfigurationIndex, + } + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(s.peer.ID, s.peer.Address, &req, &resp, snapshot); err != nil { + r.logger.Error("failed to install snapshot", "id", snapID, "error", err) + s.failures++ + return false, err + } + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", string(s.peer.ID)}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Update the indexes + atomic.StoreUint64(&s.nextIndex, meta.Index+1) + s.commitment.match(s.peer.ID, meta.Index) + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Warn("installSnapshot rejected to", "peer", s.peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + RPCHeader: r.getRPCHeader(), + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + } + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.conf.HeartbeatTimeout / 10): + case <-stopCh: + return + } + + start := time.Now() + if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { + r.logger.Error("failed to heartbeat to", "peer", s.peer.Address, "error", err) + failures++ + select { + case <-time.After(backoff(failureWait, failures, maxFailureScale)): + case <-stopCh: + } + } else { + s.setLastContact() + failures = 0 + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", string(s.peer.ID)}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(s.peer.ID, s.peer.Address) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Info("pipelining replication", "peer", s.peer) + defer r.logger.Info("aborting pipeline replication", "peer", s.peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := atomic.LoadUint64(&s.nextIndex) + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + case <-randomTimeout(r.conf.CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. It is a helper to +// pipelineReplicate. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Error("failed to pipeline appendEntries", "peer", s.peer, "error", err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + atomic.StoreUint64(nextIdx, last.Index+1) + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + req, resp := ready.Request(), ready.Response() + appendStats(string(s.peer.ID), ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.RPCHeader = r.getRPCHeader() + req.Term = s.currentTerm + req.Leader = r.trans.EncodePeer(r.localID, r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + lastSnapIdx, lastSnapTerm := r.getLastSnapshot() + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == lastSnapIdx { + req.PrevLogEntry = lastSnapIdx + req.PrevLogTerm = lastSnapTerm + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Error("failed to get log", "index", nextIndex-1, "error", err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex + req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) + maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Error("failed to get log", "index", i, "error", err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Error("peer has newer term, stopping replication", "peer", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a +// successful AppendEntries RPC. +// TODO: This isn't used during InstallSnapshot, but the code there is similar. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + last := logs[len(logs)-1] + atomic.StoreUint64(&s.nextIndex, last.Index+1) + s.commitment.match(s.peer.ID, last.Index) + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 00000000..f4c39451 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,243 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + // Version is the version number of the snapshot metadata. This does not cover + // the application's data in the snapshot, that should be versioned + // separately. + Version SnapshotVersion + + // ID is opaque to the store, and is used for opening. + ID string + + // Index and Term store when the snapshot was taken. + Index uint64 + Term uint64 + + // Peers is deprecated and used to support version 0 snapshots, but will + // be populated in version 1 snapshots as well to help with upgrades. + Peers []byte + + // Configuration and ConfigurationIndex are present in version 1 + // snapshots and later. + Configuration Configuration + ConfigurationIndex uint64 + + // Size is the size of the snapshot in bytes. + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without streaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, and with + // the given committed configuration. The version parameter controls + // which snapshot version to create. + Create(version SnapshotVersion, index, term uint64, configuration Configuration, + configurationIndex uint64, trans Transport) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.conf.SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if _, err := r.takeSnapshot(); err != nil { + r.logger.Error("failed to take snapshot", "error", err) + } + + case future := <-r.userSnapshotCh: + // User-triggered, run immediately + id, err := r.takeSnapshot() + if err != nil { + r.logger.Error("failed to take snapshot", "error", err) + } else { + future.opener = func() (*SnapshotMeta, io.ReadCloser, error) { + return r.snapshots.Open(id) + } + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap, _ := r.getLastSnapshot() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Error("failed to get last log index", "error", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.conf.SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. This must only be called from +// the snapshot thread, never the main thread. This returns the ID of the new +// snapshot, along with an error. +func (r *Raft) takeSnapshot() (string, error) { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + + // Create a request for the FSM to perform a snapshot. + snapReq := &reqSnapshotFuture{} + snapReq.init() + + // Wait for dispatch or shutdown. + select { + case r.fsmSnapshotCh <- snapReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + + // Wait until we get a response + if err := snapReq.Error(); err != nil { + if err != ErrNothingNewToSnapshot { + err = fmt.Errorf("failed to start snapshot: %v", err) + } + return "", err + } + defer snapReq.snapshot.Release() + + // Make a request for the configurations and extract the committed info. + // We have to use the future here to safely get this information since + // it is owned by the main thread. + configReq := &configurationsFuture{} + configReq.init() + select { + case r.configurationsCh <- configReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + if err := configReq.Error(); err != nil { + return "", err + } + committed := configReq.configurations.committed + committedIndex := configReq.configurations.committedIndex + + // We don't support snapshots while there's a config change outstanding + // since the snapshot doesn't have a means to represent this state. This + // is a little weird because we need the FSM to apply an index that's + // past the configuration change, even though the FSM itself doesn't see + // the configuration changes. It should be ok in practice with normal + // application traffic flowing through the FSM. If there's none of that + // then it's not crucial that we snapshot, since there's not much going + // on Raft-wise. + if snapReq.index < committedIndex { + return "", fmt.Errorf("cannot take snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + committedIndex, snapReq.index) + } + + // Create a new snapshot. + r.logger.Info("starting snapshot up to", "index", snapReq.index) + start := time.Now() + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans) + if err != nil { + return "", fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot. + start = time.Now() + if err := snapReq.snapshot.Persist(sink); err != nil { + sink.Cancel() + return "", fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error. + if err := sink.Close(); err != nil { + return "", fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info. + r.setLastSnapshot(snapReq.index, snapReq.term) + + // Compact the logs. + if err := r.compactLogs(snapReq.index); err != nil { + return "", err + } + + r.logger.Info("snapshot complete up to", "index", snapReq.index) + return sink.ID(), nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + lastLogIdx, _ := r.getLastLog() + if lastLogIdx <= r.conf.TrailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs) + + if minLog > maxLog { + r.logger.Info("no logs to truncate") + return nil + } + + r.logger.Info("compacting logs", "from", minLog, "to", maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft/stable.go b/vendor/github.com/hashicorp/raft/stable.go new file mode 100644 index 00000000..ff59a8c5 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/stable.go @@ -0,0 +1,15 @@ +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/vendor/github.com/hashicorp/raft/state.go b/vendor/github.com/hashicorp/raft/state.go new file mode 100644 index 00000000..a58cd0d1 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/state.go @@ -0,0 +1,171 @@ +package raft + +import ( + "sync" + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // currentTerm commitIndex, lastApplied, must be kept at the top of + // the struct so they're 64 bit aligned which is a requirement for + // atomic ops on 32 bit platforms. + + // The current term, cache of StableStore + currentTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // protects 4 next fields + lastLock sync.Mutex + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Cache the latest log from LogStore + lastLogIndex uint64 + lastLogTerm uint64 + + // Tracks running goroutines + routinesGroup sync.WaitGroup + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLog() (index, term uint64) { + r.lastLock.Lock() + index = r.lastLogIndex + term = r.lastLogTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastLog(index, term uint64) { + r.lastLock.Lock() + r.lastLogIndex = index + r.lastLogTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getLastSnapshot() (index, term uint64) { + r.lastLock.Lock() + index = r.lastSnapshotIndex + term = r.lastSnapshotTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastSnapshot(index, term uint64) { + r.lastLock.Lock() + r.lastSnapshotIndex = index + r.lastSnapshotTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(index uint64) { + atomic.StoreUint64(&r.commitIndex, index) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(index uint64) { + atomic.StoreUint64(&r.lastApplied, index) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.routinesGroup.Add(1) + go func() { + defer r.routinesGroup.Done() + f() + }() +} + +func (r *raftState) waitShutdown() { + r.routinesGroup.Wait() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + r.lastLock.Lock() + defer r.lastLock.Unlock() + return max(r.lastLogIndex, r.lastSnapshotIndex) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + r.lastLock.Lock() + defer r.lastLock.Unlock() + if r.lastLogIndex >= r.lastSnapshotIndex { + return r.lastLogIndex, r.lastLogTerm + } + return r.lastSnapshotIndex, r.lastSnapshotTerm +} diff --git a/vendor/github.com/hashicorp/raft/tag.sh b/vendor/github.com/hashicorp/raft/tag.sh new file mode 100644 index 00000000..cd16623a --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tag.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e + +# The version must be supplied from the environment. Do not include the +# leading "v". +if [ -z $VERSION ]; then + echo "Please specify a version." + exit 1 +fi + +# Generate the tag. +echo "==> Tagging version $VERSION..." +git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION" +git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" master + +exit 0 diff --git a/vendor/github.com/hashicorp/raft/tcp_transport.go b/vendor/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 00000000..ff40a57b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,116 @@ +package raft + +import ( + "errors" + "github.com/hashicorp/go-hclog" + "io" + "net" + "time" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransport(stream, maxPool, timeout, logOutput) + }) +} + +// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, with log output going to the supplied Logger +func NewTCPTransportWithLogger( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger hclog.Logger, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) + }) +} + +// NewTCPTransportWithConfig returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, using the given config struct. +func NewTCPTransportWithConfig( + bindAddr string, + advertise net.Addr, + config *NetworkTransportConfig, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + config.Stream = stream + return NewNetworkTransportWithConfig(config) + }) +} + +func newTCPTransport(bindAddr string, + advertise net.Addr, + transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := transportCreator(stream) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", string(address), timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/vendor/github.com/hashicorp/raft/testing.go b/vendor/github.com/hashicorp/raft/testing.go new file mode 100644 index 00000000..70fd7b79 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/testing.go @@ -0,0 +1,809 @@ +package raft + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-msgpack/codec" +) + +var ( + userSnapshotErrorsOnNoData = true +) + +// Return configurations optimized for in-memory +func inmemConfig(t *testing.T) *Config { + conf := DefaultConfig() + conf.HeartbeatTimeout = 50 * time.Millisecond + conf.ElectionTimeout = 50 * time.Millisecond + conf.LeaderLeaseTimeout = 50 * time.Millisecond + conf.CommitTimeout = 5 * time.Millisecond + conf.Logger = newTestLeveledLogger(t) + return conf +} + +// MockFSM is an implementation of the FSM interface, and just stores +// the logs sequentially. +// +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockFSM struct { + sync.Mutex + logs [][]byte + configurations []Configuration +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockFSMConfigStore struct { + FSM +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type WrappingFSM interface { + Underlying() FSM +} + +func getMockFSM(fsm FSM) *MockFSM { + switch f := fsm.(type) { + case *MockFSM: + return f + case *MockFSMConfigStore: + return f.FSM.(*MockFSM) + case WrappingFSM: + return getMockFSM(f.Underlying()) + } + + return nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockSnapshot struct { + logs [][]byte + maxIndex int +} + +var _ ConfigurationStore = (*MockFSMConfigStore)(nil) + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Apply(log *Log) interface{} { + m.Lock() + defer m.Unlock() + m.logs = append(m.logs, log.Data) + return len(m.logs) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Snapshot() (FSMSnapshot, error) { + m.Lock() + defer m.Unlock() + return &MockSnapshot{m.logs, len(m.logs)}, nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Restore(inp io.ReadCloser) error { + m.Lock() + defer m.Unlock() + defer inp.Close() + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(inp, &hd) + + m.logs = nil + return dec.Decode(&m.logs) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Logs() [][]byte { + m.Lock() + defer m.Unlock() + return m.logs +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSMConfigStore) StoreConfiguration(index uint64, config Configuration) { + mm := m.FSM.(*MockFSM) + mm.Lock() + defer mm.Unlock() + mm.configurations = append(mm.configurations, config) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockSnapshot) Persist(sink SnapshotSink) error { + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(sink, &hd) + if err := enc.Encode(m.logs[:m.maxIndex]); err != nil { + sink.Cancel() + return err + } + sink.Close() + return nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockSnapshot) Release() { +} + +// This can be used as the destination for a logger and it'll +// map them into calls to testing.T.Log, so that you only see +// the logging for failed tests. +type testLoggerAdapter struct { + t *testing.T + prefix string +} + +func (a *testLoggerAdapter) Write(d []byte) (int, error) { + if d[len(d)-1] == '\n' { + d = d[:len(d)-1] + } + if a.prefix != "" { + l := a.prefix + ": " + string(d) + if testing.Verbose() { + fmt.Printf("testLoggerAdapter verbose: %s\n", l) + } + a.t.Log(l) + return len(l), nil + } + + a.t.Log(string(d)) + return len(d), nil +} + +func newTestLogger(t *testing.T) hclog.Logger { + return hclog.New(&hclog.LoggerOptions{ + Output: &testLoggerAdapter{t: t}, + Level: hclog.DefaultLevel, + }) +} + +func newTestLoggerWithPrefix(t *testing.T, prefix string) hclog.Logger { + return hclog.New(&hclog.LoggerOptions{ + Output: &testLoggerAdapter{t: t, prefix: prefix}, + Level: hclog.DefaultLevel, + }) +} + +func newTestLeveledLogger(t *testing.T) hclog.Logger { + return hclog.New(&hclog.LoggerOptions{ + Name: "", + Output: &testLoggerAdapter{t: t}, + }) +} + +func newTestLeveledLoggerWithPrefix(t *testing.T, prefix string) hclog.Logger { + return hclog.New(&hclog.LoggerOptions{ + Name: prefix, + Output: &testLoggerAdapter{t: t, prefix: prefix}, + }) +} + +type cluster struct { + dirs []string + stores []*InmemStore + fsms []FSM + snaps []*FileSnapshotStore + trans []LoopbackTransport + rafts []*Raft + t *testing.T + observationCh chan Observation + conf *Config + propagateTimeout time.Duration + longstopTimeout time.Duration + logger hclog.Logger + startTime time.Time + + failedLock sync.Mutex + failedCh chan struct{} + failed bool +} + +func (c *cluster) Merge(other *cluster) { + c.dirs = append(c.dirs, other.dirs...) + c.stores = append(c.stores, other.stores...) + c.fsms = append(c.fsms, other.fsms...) + c.snaps = append(c.snaps, other.snaps...) + c.trans = append(c.trans, other.trans...) + c.rafts = append(c.rafts, other.rafts...) +} + +// notifyFailed will close the failed channel which can signal the goroutine +// running the test that another goroutine has detected a failure in order to +// terminate the test. +func (c *cluster) notifyFailed() { + c.failedLock.Lock() + defer c.failedLock.Unlock() + if !c.failed { + c.failed = true + close(c.failedCh) + } +} + +// Failf provides a logging function that fails the tests, prints the output +// with microseconds, and does not mysteriously eat the string. This can be +// safely called from goroutines but won't immediately halt the test. The +// failedCh will be closed to allow blocking functions in the main thread to +// detect the failure and react. Note that you should arrange for the main +// thread to block until all goroutines have completed in order to reliably +// fail tests using this function. +func (c *cluster) Failf(format string, args ...interface{}) { + c.logger.Error(fmt.Sprintf(format, args...)) + c.t.Fail() + c.notifyFailed() +} + +// FailNowf provides a logging function that fails the tests, prints the output +// with microseconds, and does not mysteriously eat the string. FailNowf must be +// called from the goroutine running the test or benchmark function, not from +// other goroutines created during the test. Calling FailNowf does not stop +// those other goroutines. +func (c *cluster) FailNowf(format string, args ...interface{}) { + c.logger.Error(fmt.Sprintf(format, args...)) + c.t.FailNow() +} + +// Close shuts down the cluster and cleans up. +func (c *cluster) Close() { + var futures []Future + for _, r := range c.rafts { + futures = append(futures, r.Shutdown()) + } + + // Wait for shutdown + limit := time.AfterFunc(c.longstopTimeout, func() { + // We can't FailNowf here, and c.Failf won't do anything if we + // hang, so panic. + panic("timed out waiting for shutdown") + }) + defer limit.Stop() + + for _, f := range futures { + if err := f.Error(); err != nil { + c.FailNowf("shutdown future err: %v", err) + } + } + + for _, d := range c.dirs { + os.RemoveAll(d) + } +} + +// WaitEventChan returns a channel which will signal if an observation is made +// or a timeout occurs. It is possible to set a filter to look for specific +// observations. Setting timeout to 0 means that it will wait forever until a +// non-filtered observation is made. +func (c *cluster) WaitEventChan(filter FilterFn, timeout time.Duration) <-chan struct{} { + ch := make(chan struct{}) + go func() { + defer close(ch) + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + for { + select { + case <-timeoutCh: + return + + case o, ok := <-c.observationCh: + if !ok || filter == nil || filter(&o) { + return + } + } + } + }() + return ch +} + +// WaitEvent waits until an observation is made, a timeout occurs, or a test +// failure is signaled. It is possible to set a filter to look for specific +// observations. Setting timeout to 0 means that it will wait forever until a +// non-filtered observation is made or a test failure is signaled. +func (c *cluster) WaitEvent(filter FilterFn, timeout time.Duration) { + select { + case <-c.failedCh: + c.t.FailNow() + + case <-c.WaitEventChan(filter, timeout): + } +} + +// WaitForReplication blocks until every FSM in the cluster has the given +// length, or the long sanity check timeout expires. +func (c *cluster) WaitForReplication(fsmLength int) { + limitCh := time.After(c.longstopTimeout) + +CHECK: + for { + ch := c.WaitEventChan(nil, c.conf.CommitTimeout) + select { + case <-c.failedCh: + c.t.FailNow() + + case <-limitCh: + c.FailNowf("timeout waiting for replication") + + case <-ch: + for _, fsmRaw := range c.fsms { + fsm := getMockFSM(fsmRaw) + fsm.Lock() + num := len(fsm.logs) + fsm.Unlock() + if num != fsmLength { + continue CHECK + } + } + return + } + } +} + +// pollState takes a snapshot of the state of the cluster. This might not be +// stable, so use GetInState() to apply some additional checks when waiting +// for the cluster to achieve a particular state. +func (c *cluster) pollState(s RaftState) ([]*Raft, uint64) { + var highestTerm uint64 + in := make([]*Raft, 0, 1) + for _, r := range c.rafts { + if r.State() == s { + in = append(in, r) + } + term := r.getCurrentTerm() + if term > highestTerm { + highestTerm = term + } + } + return in, highestTerm +} + +// GetInState polls the state of the cluster and attempts to identify when it has +// settled into the given state. +func (c *cluster) GetInState(s RaftState) []*Raft { + c.logger.Info("starting stability test", "raft-state", s) + limitCh := time.After(c.longstopTimeout) + + // An election should complete after 2 * max(HeartbeatTimeout, ElectionTimeout) + // because of the randomised timer expiring in 1 x interval ... 2 x interval. + // We add a bit for propagation delay. If the election fails (e.g. because + // two elections start at once), we will have got something through our + // observer channel indicating a different state (i.e. one of the nodes + // will have moved to candidate state) which will reset the timer. + // + // Because of an implementation peculiarity, it can actually be 3 x timeout. + timeout := c.conf.HeartbeatTimeout + if timeout < c.conf.ElectionTimeout { + timeout = c.conf.ElectionTimeout + } + timeout = 2*timeout + c.conf.CommitTimeout + timer := time.NewTimer(timeout) + defer timer.Stop() + + // Wait until we have a stable instate slice. Each time we see an + // observation a state has changed, recheck it and if it has changed, + // restart the timer. + var pollStartTime = time.Now() + for { + inState, highestTerm := c.pollState(s) + inStateTime := time.Now() + + // Sometimes this routine is called very early on before the + // rafts have started up. We then timeout even though no one has + // even started an election. So if the highest term in use is + // zero, we know there are no raft processes that have yet issued + // a RequestVote, and we set a long time out. This is fixed when + // we hear the first RequestVote, at which point we reset the + // timer. + if highestTerm == 0 { + timer.Reset(c.longstopTimeout) + } else { + timer.Reset(timeout) + } + + // Filter will wake up whenever we observe a RequestVote. + filter := func(ob *Observation) bool { + switch ob.Data.(type) { + case RaftState: + return true + case RequestVoteRequest: + return true + default: + return false + } + } + + select { + case <-c.failedCh: + c.t.FailNow() + + case <-limitCh: + c.FailNowf("timeout waiting for stable %s state", s) + + case <-c.WaitEventChan(filter, 0): + c.logger.Debug("resetting stability timeout") + + case t, ok := <-timer.C: + if !ok { + c.FailNowf("timer channel errored") + } + + c.logger.Info(fmt.Sprintf("stable state for %s reached at %s (%d nodes), %s from start of poll, %s from cluster start. Timeout at %s, %s after stability", + s, inStateTime, len(inState), inStateTime.Sub(pollStartTime), inStateTime.Sub(c.startTime), t, t.Sub(inStateTime))) + return inState + } + } +} + +// Leader waits for the cluster to elect a leader and stay in a stable state. +func (c *cluster) Leader() *Raft { + leaders := c.GetInState(Leader) + if len(leaders) != 1 { + c.FailNowf("expected one leader: %v", leaders) + } + return leaders[0] +} + +// Followers waits for the cluster to have N-1 followers and stay in a stable +// state. +func (c *cluster) Followers() []*Raft { + expFollowers := len(c.rafts) - 1 + followers := c.GetInState(Follower) + if len(followers) != expFollowers { + c.FailNowf("timeout waiting for %d followers (followers are %v)", expFollowers, followers) + } + return followers +} + +// FullyConnect connects all the transports together. +func (c *cluster) FullyConnect() { + c.logger.Debug("fully connecting") + for i, t1 := range c.trans { + for j, t2 := range c.trans { + if i != j { + t1.Connect(t2.LocalAddr(), t2) + t2.Connect(t1.LocalAddr(), t1) + } + } + } +} + +// Disconnect disconnects all transports from the given address. +func (c *cluster) Disconnect(a ServerAddress) { + c.logger.Debug("disconnecting", "address", a) + for _, t := range c.trans { + if t.LocalAddr() == a { + t.DisconnectAll() + } else { + t.Disconnect(a) + } + } +} + +// Partition keeps the given list of addresses connected but isolates them +// from the other members of the cluster. +func (c *cluster) Partition(far []ServerAddress) { + c.logger.Debug("partitioning", "addresses", far) + + // Gather the set of nodes on the "near" side of the partition (we + // will call the supplied list of nodes the "far" side). + near := make(map[ServerAddress]struct{}) +OUTER: + for _, t := range c.trans { + l := t.LocalAddr() + for _, a := range far { + if l == a { + continue OUTER + } + } + near[l] = struct{}{} + } + + // Now fixup all the connections. The near side will be separated from + // the far side, and vice-versa. + for _, t := range c.trans { + l := t.LocalAddr() + if _, ok := near[l]; ok { + for _, a := range far { + t.Disconnect(a) + } + } else { + for a := range near { + t.Disconnect(a) + } + } + } +} + +// IndexOf returns the index of the given raft instance. +func (c *cluster) IndexOf(r *Raft) int { + for i, n := range c.rafts { + if n == r { + return i + } + } + return -1 +} + +// EnsureLeader checks that ALL the nodes think the leader is the given expected +// leader. +func (c *cluster) EnsureLeader(t *testing.T, expect ServerAddress) { + // We assume c.Leader() has been called already; now check all the rafts + // think the leader is correct + fail := false + for _, r := range c.rafts { + leader := ServerAddress(r.Leader()) + if leader != expect { + if leader == "" { + leader = "[none]" + } + if expect == "" { + c.logger.Error("peer sees incorrect leader", "peer", r, "leader", leader, "expected-leader", "[none]") + } else { + c.logger.Error("peer sees incorrect leader", "peer", r, "leader", leader, "expected-leader", expect) + } + fail = true + } + } + if fail { + c.FailNowf("at least one peer has the wrong notion of leader") + } +} + +// EnsureSame makes sure all the FSMs have the same contents. +func (c *cluster) EnsureSame(t *testing.T) { + limit := time.Now().Add(c.longstopTimeout) + first := getMockFSM(c.fsms[0]) + +CHECK: + first.Lock() + for i, fsmRaw := range c.fsms { + fsm := getMockFSM(fsmRaw) + if i == 0 { + continue + } + fsm.Lock() + + if len(first.logs) != len(fsm.logs) { + fsm.Unlock() + if time.Now().After(limit) { + c.FailNowf("FSM log length mismatch: %d %d", + len(first.logs), len(fsm.logs)) + } else { + goto WAIT + } + } + + for idx := 0; idx < len(first.logs); idx++ { + if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 { + fsm.Unlock() + if time.Now().After(limit) { + c.FailNowf("FSM log mismatch at index %d", idx) + } else { + goto WAIT + } + } + } + if len(first.configurations) != len(fsm.configurations) { + fsm.Unlock() + if time.Now().After(limit) { + c.FailNowf("FSM configuration length mismatch: %d %d", + len(first.logs), len(fsm.logs)) + } else { + goto WAIT + } + } + + for idx := 0; idx < len(first.configurations); idx++ { + if !reflect.DeepEqual(first.configurations[idx], fsm.configurations[idx]) { + fsm.Unlock() + if time.Now().After(limit) { + c.FailNowf("FSM configuration mismatch at index %d: %v, %v", idx, first.configurations[idx], fsm.configurations[idx]) + } else { + goto WAIT + } + } + } + fsm.Unlock() + } + + first.Unlock() + return + +WAIT: + first.Unlock() + c.WaitEvent(nil, c.conf.CommitTimeout) + goto CHECK +} + +// getConfiguration returns the configuration of the given Raft instance, or +// fails the test if there's an error +func (c *cluster) getConfiguration(r *Raft) Configuration { + future := r.GetConfiguration() + if err := future.Error(); err != nil { + c.FailNowf("failed to get configuration: %v", err) + return Configuration{} + } + + return future.Configuration() +} + +// EnsureSamePeers makes sure all the rafts have the same set of peers. +func (c *cluster) EnsureSamePeers(t *testing.T) { + limit := time.Now().Add(c.longstopTimeout) + peerSet := c.getConfiguration(c.rafts[0]) + +CHECK: + for i, raft := range c.rafts { + if i == 0 { + continue + } + + otherSet := c.getConfiguration(raft) + if !reflect.DeepEqual(peerSet, otherSet) { + if time.Now().After(limit) { + c.FailNowf("peer mismatch: %+v %+v", peerSet, otherSet) + } else { + goto WAIT + } + } + } + return + +WAIT: + c.WaitEvent(nil, c.conf.CommitTimeout) + goto CHECK +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MakeClusterOpts struct { + Peers int + Bootstrap bool + Conf *Config + ConfigStoreFSM bool + MakeFSMFunc func() FSM + LongstopTimeout time.Duration +} + +// makeCluster will return a cluster with the given config and number of peers. +// If bootstrap is true, the servers will know about each other before starting, +// otherwise their transports will be wired up but they won't yet have configured +// each other. +func makeCluster(t *testing.T, opts *MakeClusterOpts) *cluster { + if opts.Conf == nil { + opts.Conf = inmemConfig(t) + } + + c := &cluster{ + observationCh: make(chan Observation, 1024), + conf: opts.Conf, + // Propagation takes a maximum of 2 heartbeat timeouts (time to + // get a new heartbeat that would cause a commit) plus a bit. + propagateTimeout: opts.Conf.HeartbeatTimeout*2 + opts.Conf.CommitTimeout, + longstopTimeout: 5 * time.Second, + logger: newTestLoggerWithPrefix(t, "cluster"), + failedCh: make(chan struct{}), + } + if opts.LongstopTimeout > 0 { + c.longstopTimeout = opts.LongstopTimeout + } + + c.t = t + var configuration Configuration + + // Setup the stores and transports + for i := 0; i < opts.Peers; i++ { + dir, err := ioutil.TempDir("", "raft") + if err != nil { + c.FailNowf("err: %v", err) + } + + store := NewInmemStore() + c.dirs = append(c.dirs, dir) + c.stores = append(c.stores, store) + if opts.ConfigStoreFSM { + c.fsms = append(c.fsms, &MockFSMConfigStore{ + FSM: &MockFSM{}, + }) + } else { + var fsm FSM + if opts.MakeFSMFunc != nil { + fsm = opts.MakeFSMFunc() + } else { + fsm = &MockFSM{} + } + c.fsms = append(c.fsms, fsm) + } + + dir2, snap := FileSnapTest(t) + c.dirs = append(c.dirs, dir2) + c.snaps = append(c.snaps, snap) + + addr, trans := NewInmemTransport("") + c.trans = append(c.trans, trans) + localID := ServerID(fmt.Sprintf("server-%s", addr)) + if opts.Conf.ProtocolVersion < 3 { + localID = ServerID(addr) + } + configuration.Servers = append(configuration.Servers, Server{ + Suffrage: Voter, + ID: localID, + Address: addr, + }) + } + + // Wire the transports together + c.FullyConnect() + + // Create all the rafts + c.startTime = time.Now() + for i := 0; i < opts.Peers; i++ { + logs := c.stores[i] + store := c.stores[i] + snap := c.snaps[i] + trans := c.trans[i] + + peerConf := opts.Conf + peerConf.LocalID = configuration.Servers[i].ID + peerConf.Logger = newTestLeveledLoggerWithPrefix(t, string(configuration.Servers[i].ID)) + + if opts.Bootstrap { + err := BootstrapCluster(peerConf, logs, store, snap, trans, configuration) + if err != nil { + c.FailNowf("BootstrapCluster failed: %v", err) + } + } + + raft, err := NewRaft(peerConf, c.fsms[i], logs, store, snap, trans) + if err != nil { + c.FailNowf("NewRaft failed: %v", err) + } + + raft.RegisterObserver(NewObserver(c.observationCh, false, nil)) + if err != nil { + c.FailNowf("RegisterObserver failed: %v", err) + } + c.rafts = append(c.rafts, raft) + } + + return c +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeCluster(n int, t *testing.T, conf *Config) *cluster { + return makeCluster(t, &MakeClusterOpts{ + Peers: n, + Bootstrap: true, + Conf: conf, + }) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeClusterNoBootstrap(n int, t *testing.T, conf *Config) *cluster { + return makeCluster(t, &MakeClusterOpts{ + Peers: n, + Conf: conf, + }) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeClusterCustom(t *testing.T, opts *MakeClusterOpts) *cluster { + return makeCluster(t, opts) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + snap, err := NewFileSnapshotStoreWithLogger(dir, 3, newTestLogger(t)) + if err != nil { + t.Fatalf("err: %v", err) + } + return dir, snap +} diff --git a/vendor/github.com/hashicorp/raft/testing_batch.go b/vendor/github.com/hashicorp/raft/testing_batch.go new file mode 100644 index 00000000..afb22856 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/testing_batch.go @@ -0,0 +1,29 @@ +// +build batchtest + +package raft + +func init() { + userSnapshotErrorsOnNoData = false +} + +// ApplyBatch enables MockFSM to satisfy the BatchingFSM interface. This +// function is gated by the batchtest build flag. +// +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) ApplyBatch(logs []*Log) []interface{} { + m.Lock() + defer m.Unlock() + + ret := make([]interface{}, len(logs)) + for i, log := range logs { + switch log.Type { + case LogCommand: + m.logs = append(m.logs, log.Data) + ret[i] = len(m.logs) + default: + ret[i] = nil + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go new file mode 100644 index 00000000..b18d2459 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/transport.go @@ -0,0 +1,127 @@ +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() ServerAddress + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer's address. + EncodePeer(id ServerID, addr ServerAddress) []byte + + // DecodePeer is used to deserialize a peer's address. + DecodePeer([]byte) ServerAddress + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) + + // TimeoutNow is used to start a leadership transfer to the target node. + TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error +} + +// WithClose is an interface that a transport may provide which +// allows a transport to be shut down cleanly when a Raft instance +// shuts down. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithClose interface { + // Close permanently closes a transport, stopping + // any associated goroutines and freeing other resources. + Close() error +} + +// LoopbackTransport is an interface that provides a loopback transport suitable for testing +// e.g. InmemTransport. It's there so we don't have to rewrite tests. +type LoopbackTransport interface { + Transport // Embedded transport reference + WithPeers // Embedded peer management + WithClose // with a close routine +} + +// WithPeers is an interface that a transport may provide which allows for connection and +// disconnection. Unless the transport is a loopback transport, the transport specified to +// "Connect" is likely to be nil. +type WithPeers interface { + Connect(peer ServerAddress, t Transport) // Connect a peer + Disconnect(peer ServerAddress) // Disconnect a given peer + DisconnectAll() // Disconnect all peers, possibly to reconnect them later +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Close closes the pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + + // Start returns the time that the append request was started. + // It is always OK to call this method. + Start() time.Time + + // Request holds the parameters of the AppendEntries call. + // It is always OK to call this method. + Request() *AppendEntriesRequest + + // Response holds the results of the AppendEntries call. + // This method must only be called after the Error + // method returns, and will only be valid on success. + Response() *AppendEntriesResponse +} diff --git a/vendor/github.com/hashicorp/raft/util.go b/vendor/github.com/hashicorp/raft/util.go new file mode 100644 index 00000000..90428d74 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/util.go @@ -0,0 +1,133 @@ +package raft + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the psuedo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := (time.Duration(rand.Int63()) % minVal) + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// drainNotifyCh empties out a single-item notification channel without +// blocking, and returns whether it received anything. +func drainNotifyCh(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} + +// Needed for sorting []uint64, used to determine commitment +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.gitignore b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.gitignore new file mode 100644 index 00000000..b536c482 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.gitignore @@ -0,0 +1,58 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.cover + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Other dirs +/bin/ +/pkg/ + +# Vault-specific +example.hcl +example.vault.d + +# Vagrant +.vagrant/ +Vagrantfile + +# Configs +*.hcl + +.DS_Store +.idea +.vscode + +dist/* + +tags + +# Editor backups +*~ +*.sw[a-z] + +# IntelliJ IDEA project files +.idea +*.ipr +*.iml + diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.travis.yml b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.travis.yml new file mode 100644 index 00000000..447f3bbd --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - "1.12" +env: +- GO111MODULE=on +script: + - go test diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/LICENSE b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/Makefile b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/Makefile new file mode 100644 index 00000000..38e7d5dc --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/Makefile @@ -0,0 +1,51 @@ +TOOL?=vault-plugin-secrets-kv +TEST?=$$(go list ./... | grep -v /vendor/) +VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr +EXTERNAL_TOOLS=\ + github.com/mitchellh/gox \ + github.com/golang/dep/cmd/dep +BUILD_TAGS?=${TOOL} +GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) + +# bin generates the releaseable binaries for this plugin +bin: fmtcheck generate + @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" + +default: dev + +# dev creates binaries for testing Vault locally. These are put +# into ./bin/ as well as $GOPATH/bin. +dev: fmtcheck generate + @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" + +# test runs the unit tests and vets the code +test: fmtcheck generate + CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -count=1 -timeout=20m -parallel=4 + +testcompile: fmtcheck generate + @for pkg in $(TEST) ; do \ + go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ + done + +# generate runs `go generate` to build the dynamically generated +# source files. +generate: + go generate $(go list ./... | grep -v /vendor/) + +# bootstrap the build by downloading additional tools +bootstrap: + @for tool in $(EXTERNAL_TOOLS) ; do \ + echo "Installing/Updating $$tool" ; \ + go get -u $$tool; \ + done + +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +fmt: + gofmt -w $(GOFMT_FILES) + +proto: + protoc *.proto --go_out=plugins=grpc:. + +.PHONY: bin default generate test vet bootstrap fmt fmtcheck diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/README.md b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/README.md new file mode 100644 index 00000000..f9274783 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/README.md @@ -0,0 +1,125 @@ +# Vault Plugin: Key-Value Secrets Backend [![Build Status](https://travis-ci.org/hashicorp/vault-plugin-secrets-kv.svg?branch=master)](https://travis-ci.org/hashicorp/vault-plugin-secrets-kv) + +This is a standalone backend plugin for use with [Hashicorp Vault](https://www.github.com/hashicorp/vault). +This plugin provides Key-Value functionality to Vault. + +**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). + +## Quick Links + - Vault Website: https://www.vaultproject.io + - KV Docs: https://www.vaultproject.io/docs/secrets/kv/index.html + - Main Project Github: https://www.github.com/hashicorp/vault + +## Getting Started + +This is a [Vault plugin](https://www.vaultproject.io/docs/internals/plugins.html) +and is meant to work with Vault. This guide assumes you have already installed Vault +and have a basic understanding of how Vault works. + +Otherwise, first read this guide on how to [get started with Vault](https://www.vaultproject.io/intro/getting-started/install.html). + +To learn specifically about how plugins work, see documentation on [Vault plugins](https://www.vaultproject.io/docs/internals/plugins.html). + +## Usage + +Please see [documentation for the plugin](https://www.vaultproject.io/docs/secrets/kv/index.html) +on the Vault website. + +This plugin is currently built into Vault and by default is accessed +at `kv`. To enable this in a running Vault server: + +```sh +$ vault secrets enable kv +Success! Enabled the kv secrets engine at: kv/ +``` + +Additionally starting with Vault 0.10 this backend is by default mounted +at `secret/`. + +## Developing + +If you wish to work on this plugin, you'll first need +[Go](https://www.golang.org) installed on your machine +(version 1.10+ is *required*). + +For local dev first make sure Go is properly installed, including +setting up a [GOPATH](https://golang.org/doc/code.html#GOPATH). +Next, clone this repository into +`$GOPATH/src/github.com/hashicorp/vault-plugin-secrets-kv`. +You can then download any required build tools by bootstrapping your +environment: + +```sh +$ make bootstrap +``` + +To compile a development version of this plugin, run `make` or `make dev`. +This will put the plugin binary in the `bin` and `$GOPATH/bin` folders. `dev` +mode will only generate the binary for your platform and is faster: + +```sh +$ make +$ make dev +``` + +Put the plugin binary into a location of your choice. This directory +will be specified as the [`plugin_directory`](https://www.vaultproject.io/docs/configuration/index.html#plugin_directory) +in the Vault config used to start the server. + +```json +... +plugin_directory = "path/to/plugin/directory" +... +``` + +Start a Vault server with this config file: +```sh +$ vault server -config=path/to/config.json ... +... +``` + +Once the server is started, register the plugin in the Vault server's [plugin catalog](https://www.vaultproject.io/docs/internals/plugins.html#plugin-catalog): + +```sh +$ vault write sys/plugins/catalog/kv \ + sha_256= \ + command="vault-plugin-secrets-kv" +... +Success! Data written to: sys/plugins/catalog/kv +``` + +Note you should generate a new sha256 checksum if you have made changes +to the plugin. Example using openssl: + +```sh +openssl dgst -sha256 $GOPATH/vault-plugin-secrets-kv +... +SHA256(.../go/bin/vault-plugin-secrets-kv)= 896c13c0f5305daed381952a128322e02bc28a57d0c862a78cbc2ea66e8c6fa1 +``` + +Enable the auth plugin backend using the secrets enable plugin command: + +```sh +$ vault secrets enable -plugin-name='kv' plugin +... + +Successfully enabled 'plugin' at 'kv'! +``` + +#### Tests + +If you are developing this plugin and want to verify it is still +functioning (and you haven't broken anything else), we recommend +running the tests. + +To run the tests, invoke `make test`: + +```sh +$ make test +``` + +You can also specify a `TESTARGS` variable to filter tests like so: + +```sh +$ make test TESTARGS='--run=TestConfig' +``` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go new file mode 100644 index 00000000..503df8b7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go @@ -0,0 +1,465 @@ +package kv + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // configPath is the location where the config is stored + configPath string = "config" + + // metadataPrefix is the prefix where the key metadata is stored. + metadataPrefix string = "metadata/" + + // versionPrefix is the prefix where the version data is stored. + versionPrefix string = "versions/" + + // defaultMaxVersions is the number of versions to keep around unless set by + // the config or key configuration. + defaultMaxVersions uint32 = 10 +) + +// versionedKVBackend implements logical.Backend +type versionedKVBackend struct { + *framework.Backend + + // keyEncryptedWrapper is a cached version of the EncryptedKeyStorageWrapper + keyEncryptedWrapper *keysutil.EncryptedKeyStorageWrapper + + // salt is the cached version of the salt used to create paths for version + // data storage paths. + salt *salt.Salt + + // l locks the keyPolicy and salt caches. + l sync.RWMutex + + // locks is a slice of 256 locks that are used to protect key and version + // updates. + locks []*locksutil.LockEntry + + // storagePrefix is the prefix given to all the data for a versioned KV + // store. We prefix this data so that upgrading from a passthrough backend + // to a versioned backend is easier. This value is passed from Vault core + // through the backend config. + storagePrefix string + + // upgrading is an atomic value denoting if the backend is in the process of + // upgrading its data. + upgrading *uint32 + + // globalConfig is a cached value for fast lookup + globalConfig *Configuration + globalConfigLock *sync.RWMutex + + // upgradeCancelFunc is used to be able to shut down the upgrade checking + // goroutine from cleanup + upgradeCancelFunc context.CancelFunc +} + +// Factory will return a logical backend of type versionedKVBackend or +// PassthroughBackend based on the config passed in. +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + version := conf.Config["version"] + + var b logical.Backend + var err error + switch version { + case "1", "": + return LeaseSwitchedPassthroughBackend(ctx, conf, conf.Config["leased_passthrough"] == "true") + case "2": + b, err = VersionedKVFactory(ctx, conf) + } + if err != nil { + return nil, err + } + + return b, nil +} + +// Factory returns a new backend as logical.Backend. +func VersionedKVFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + upgradeCtx, upgradeCancelFunc := context.WithCancel(ctx) + + b := &versionedKVBackend{ + upgrading: new(uint32), + globalConfigLock: new(sync.RWMutex), + upgradeCancelFunc: upgradeCancelFunc, + } + if conf.BackendUUID == "" { + return nil, errors.New("could not initialize versioned K/V Store, no UUID was provided") + } + b.storagePrefix = conf.BackendUUID + + b.Backend = &framework.Backend{ + BackendType: logical.TypeLogical, + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + // Seal wrap the versioned data + path.Join(b.storagePrefix, versionPrefix) + "/", + + // Seal wrap the key policy + path.Join(b.storagePrefix, "policy") + "/", + + // Seal wrap the archived key policy + path.Join(b.storagePrefix, "archive") + "/", + }, + }, + + Paths: framework.PathAppend( + []*framework.Path{ + pathConfig(b), + pathData(b), + pathMetadata(b), + pathDestroy(b), + }, + pathsDelete(b), + + // Make sure this stays at the end so that the valid paths are + // processed first. + pathInvalid(b), + ), + } + + b.locks = locksutil.CreateLocks() + + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + + upgradeDone, err := b.upgradeDone(ctx, conf.StorageView) + if err != nil { + return nil, err + } + if !upgradeDone { + err := b.Upgrade(upgradeCtx, conf.StorageView) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func pathInvalid(b *versionedKVBackend) []*framework.Path { + handler := func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + switch req.Path { + case "metadata", "data", "delete", "undelete", "destroy": + resp := &logical.Response{} + resp.AddWarning("Non-listing operations on the root of a K/V v2 mount are not supported.") + return logical.RespondWithStatusCode(resp, req, http.StatusNotFound) + } + + var subCommand string + switch req.Operation { + case logical.CreateOperation, logical.UpdateOperation: + subCommand = "put" + case logical.ReadOperation: + subCommand = "get" + case logical.ListOperation: + subCommand = "list" + case logical.DeleteOperation: + subCommand = "delete" + } + resp := &logical.Response{} + resp.AddWarning(fmt.Sprintf("Invalid path for a versioned K/V secrets engine. See the API docs for the appropriate API endpoints to use. If using the Vault CLI, use 'vault kv %s' for this operation.", subCommand)) + return logical.RespondWithStatusCode(resp, req, http.StatusNotFound) + } + + return []*framework.Path{ + &framework.Path{ + Pattern: ".*", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{Callback: handler, Unpublished: true}, + logical.CreateOperation: &framework.PathOperation{Callback: handler, Unpublished: true}, + logical.ReadOperation: &framework.PathOperation{Callback: handler, Unpublished: true}, + logical.DeleteOperation: &framework.PathOperation{Callback: handler, Unpublished: true}, + logical.ListOperation: &framework.PathOperation{Callback: handler, Unpublished: true}, + }, + + HelpDescription: pathInvalidHelp, + }, + } +} + +func (b *versionedKVBackend) Cleanup(ctx context.Context) { + if b.upgradeCancelFunc != nil { + b.upgradeCancelFunc() + } +} + +// Invalidate invalidates the salt and the policy so replication secondaries can +// cache these values. +func (b *versionedKVBackend) Invalidate(ctx context.Context, key string) { + switch key { + case path.Join(b.storagePrefix, salt.DefaultLocation): + b.l.Lock() + b.salt = nil + b.l.Unlock() + case path.Join(b.storagePrefix, "policy/metadata"): + b.l.Lock() + b.keyEncryptedWrapper = nil + b.l.Unlock() + case path.Join(b.storagePrefix, configPath): + b.globalConfigLock.Lock() + b.globalConfig = nil + b.globalConfigLock.Unlock() + } +} + +// Salt will load a the salt, or if one has not been created yet it will +// generate and store a new salt. +func (b *versionedKVBackend) Salt(ctx context.Context, s logical.Storage) (*salt.Salt, error) { + b.l.RLock() + if b.salt != nil { + defer b.l.RUnlock() + return b.salt, nil + } + b.l.RUnlock() + b.l.Lock() + defer b.l.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, s, &salt.Config{ + HashFunc: salt.SHA256Hash, + Location: path.Join(b.storagePrefix, salt.DefaultLocation), + }) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +// policy loads the key policy for this backend, if one has not been created yet +// it will generate and store a new policy. The caller must have the backend lock. +func (b *versionedKVBackend) policy(ctx context.Context, s logical.Storage) (*keysutil.Policy, error) { + // Try loading policy + policy, err := keysutil.LoadPolicy(ctx, s, path.Join(b.storagePrefix, "policy/metadata")) + if err != nil { + return nil, err + } + if policy != nil { + return policy, nil + } + + // Policy didn't exist, create it. + policy = keysutil.NewPolicy(keysutil.PolicyConfig{ + Name: "metadata", + Type: keysutil.KeyType_AES256_GCM96, + Derived: true, + KDF: keysutil.Kdf_hkdf_sha256, + ConvergentEncryption: true, + StoragePrefix: b.storagePrefix, + VersionTemplate: keysutil.EncryptedKeyPolicyVersionTpl, + }) + + err = policy.Rotate(ctx, s, b.GetRandomReader()) + if err != nil { + return nil, err + } + + return policy, nil +} + +func (b *versionedKVBackend) getKeyEncryptor(ctx context.Context, s logical.Storage) (*keysutil.EncryptedKeyStorageWrapper, error) { + b.l.RLock() + if b.keyEncryptedWrapper != nil { + defer b.l.RUnlock() + return b.keyEncryptedWrapper, nil + } + b.l.RUnlock() + b.l.Lock() + defer b.l.Unlock() + + if b.keyEncryptedWrapper != nil { + return b.keyEncryptedWrapper, nil + } + + policy, err := b.policy(ctx, s) + if err != nil { + return nil, err + } + + e, err := keysutil.NewEncryptedKeyStorageWrapper(keysutil.EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: path.Join(b.storagePrefix, metadataPrefix), + }) + if err != nil { + return nil, err + } + + // Cache the value + b.keyEncryptedWrapper = e + + return b.keyEncryptedWrapper, nil +} + +// config takes a storage object and returns a configuration object +func (b *versionedKVBackend) config(ctx context.Context, s logical.Storage) (*Configuration, error) { + b.globalConfigLock.RLock() + if b.globalConfig != nil { + defer b.globalConfigLock.RUnlock() + return &Configuration{ + CasRequired: b.globalConfig.CasRequired, + MaxVersions: b.globalConfig.MaxVersions, + DeleteVersionAfter: b.globalConfig.DeleteVersionAfter, + }, nil + } + + b.globalConfigLock.RUnlock() + b.globalConfigLock.Lock() + defer b.globalConfigLock.Unlock() + + // Verify this hasn't already changed + if b.globalConfig != nil { + return &Configuration{ + CasRequired: b.globalConfig.CasRequired, + MaxVersions: b.globalConfig.MaxVersions, + DeleteVersionAfter: b.globalConfig.DeleteVersionAfter, + }, nil + } + + raw, err := s.Get(ctx, path.Join(b.storagePrefix, configPath)) + if err != nil { + return nil, err + } + + conf := &Configuration{} + if raw != nil { + if err := proto.Unmarshal(raw.Value, conf); err != nil { + return nil, err + } + } + + b.globalConfig = conf + + return conf, nil +} + +// getVersionKey uses the salt to generate the version key for a specific +// version of a key. +func (b *versionedKVBackend) getVersionKey(ctx context.Context, key string, version uint64, s logical.Storage) (string, error) { + salt, err := b.Salt(ctx, s) + if err != nil { + return "", err + } + + salted := salt.SaltID(fmt.Sprintf("%s|%d", key, version)) + + return path.Join(b.storagePrefix, versionPrefix, salted[0:3], salted[3:]), nil +} + +// getKeyMetadata returns the metadata object for the provided key, if no object +// exits it will return nil. +func (b *versionedKVBackend) getKeyMetadata(ctx context.Context, s logical.Storage, key string) (*KeyMetadata, error) { + + wrapper, err := b.getKeyEncryptor(ctx, s) + if err != nil { + return nil, err + } + + es := wrapper.Wrap(s) + + item, err := es.Get(ctx, key) + if err != nil { + return nil, err + } + if item == nil { + return nil, nil + } + + meta := &KeyMetadata{} + err = proto.Unmarshal(item.Value, meta) + if err != nil { + return nil, fmt.Errorf("failed to decode key metadata from storage: %v", err) + } + + return meta, nil +} + +// writeKeyMetadata writes a metadata object to storage. +func (b *versionedKVBackend) writeKeyMetadata(ctx context.Context, s logical.Storage, meta *KeyMetadata) error { + wrapper, err := b.getKeyEncryptor(ctx, s) + if err != nil { + return err + } + + es := wrapper.Wrap(s) + + bytes, err := proto.Marshal(meta) + if err != nil { + return err + } + + err = es.Put(ctx, &logical.StorageEntry{ + Key: meta.Key, + Value: bytes, + }) + if err != nil { + return err + } + + return nil +} + +func ptypesTimestampToString(t *timestamp.Timestamp) string { + if t == nil { + return "" + } + + return ptypes.TimestampString(t) +} + +var backendHelp string = ` +This backend provides a versioned key-value store. The kv backend reads and +writes arbitrary secrets to the storage backend. The secrets are +encrypted/decrypted by Vault: they are never stored unencrypted in the backend +and the backend never has an opportunity to see the unencrypted value. Each key +can have a configured number of versions, and versions can be retrieved based on +their version numbers. +` + +var pathInvalidHelp string = backendHelp + ` + +## PATHS + +The following paths are supported by this backend. To view help for +any of the paths below, use the help command with any route matching +the path pattern. Note that depending on the policy of your auth token, +you may or may not be able to access certain paths. + + ^config$ + Configures settings for the KV store + + ^data/.*$ + Write, Read, and Delete data in the Key-Value Store. + + ^delete/.*$ + Marks one or more versions as deleted in the KV store. + + ^destroy/.*$ + Permanently removes one or more versions in the KV store + + ^metadata/.*$ + Configures settings for the KV store + + ^undelete/.*$ + Undeletes one or more versions from the KV store. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/delete_version_after.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/delete_version_after.go new file mode 100644 index 00000000..aae4bc82 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/delete_version_after.go @@ -0,0 +1,64 @@ +package kv + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" +) + +// deletionTime returns the time of creation plus the duration of the +// minimum non-zero value of mount or meta. If mount and meta are zero, +// false is returned. +func deletionTime(creation time.Time, mount, meta time.Duration) (time.Time, bool) { + if mount == 0 && meta == 0 { + return time.Time{}, false + } + var min time.Duration + if meta != 0 { + min = meta + } + if (mount != 0 && mount < min) || min == 0 { + min = mount + } + return creation.Add(min), true +} + +type deleteVersionAfterGetter interface { + GetDeleteVersionAfter() *duration.Duration +} + +func deleteVersionAfter(v deleteVersionAfterGetter) time.Duration { + if v.GetDeleteVersionAfter() == nil { + return time.Duration(0) + } + dva, err := ptypes.Duration(v.GetDeleteVersionAfter()) + if err != nil { + return time.Duration(0) + } + return dva +} + +const ( + disabled time.Duration = -1 * time.Second +) + +// IsDeleteVersionAfterDisabled returns true if DeleteVersionAfter is +// disabled. +func (c *Configuration) IsDeleteVersionAfterDisabled() bool { + if deleteVersionAfter(c) == disabled { + return true + } + return false +} + +// DisableDeleteVersionAfter disables DeleteVersionAfter. +func (c *Configuration) DisableDeleteVersionAfter() { + c.DeleteVersionAfter = ptypes.DurationProto(disabled) +} + +// ResetDeleteVersionAfter resets the DeleteVersionAfter to the default +// value. +func (c *Configuration) ResetDeleteVersionAfter() { + c.DeleteVersionAfter = nil +} diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.mod b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.mod new file mode 100644 index 00000000..58e708ea --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.mod @@ -0,0 +1,14 @@ +module github.com/hashicorp/vault-plugin-secrets-kv + +go 1.12 + +require ( + github.com/golang/protobuf v1.3.2 + github.com/hashicorp/go-hclog v0.12.0 + github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 + github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820 + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/mitchellh/mapstructure v1.1.2 + golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db // indirect + google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 // indirect +) diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.sum b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.sum new file mode 100644 index 00000000..25c7b717 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/go.sum @@ -0,0 +1,172 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 h1:biZidYDDEWnuOI9mXnJre8lwHKhb5ym85aSXk3oz/dc= +github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500 h1:tiMX2ewq4ble+e2zENzBvaH2dMoFHe80NbnrF5Ir9Kk= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820 h1:TmDZ1sS6gU0hFeFlFuyJVUwRPEzifZIHCBeS2WF2uSc= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/passthrough.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/passthrough.go new file mode 100644 index 00000000..aa24bb32 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/passthrough.go @@ -0,0 +1,283 @@ +package kv + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +type Passthrough interface { + handleRead() framework.OperationFunc + handleWrite() framework.OperationFunc + handleDelete() framework.OperationFunc + handleList() framework.OperationFunc + handleExistenceCheck() framework.ExistenceFunc +} + +// PassthroughBackendFactory returns a PassthroughBackend +// with leases switched off +func PassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + return LeaseSwitchedPassthroughBackend(ctx, conf, false) +} + +// LeasedPassthroughBackendFactory returns a PassthroughBackend +// with leases switched on +func LeasedPassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + return LeaseSwitchedPassthroughBackend(ctx, conf, true) +} + +// LeaseSwitchedPassthroughBackend returns a PassthroughBackend +// with leases switched on or off +func LeaseSwitchedPassthroughBackend(ctx context.Context, conf *logical.BackendConfig, leases bool) (logical.Backend, error) { + b := &PassthroughBackend{ + generateLeases: leases, + } + + backend := &framework.Backend{ + BackendType: logical.TypeLogical, + Help: strings.TrimSpace(passthroughHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "*", + }, + }, + + Paths: []*framework.Path{ + &framework.Path{ + Pattern: framework.MatchAllRegex("path"), + + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleRead(), + logical.CreateOperation: b.handleWrite(), + logical.UpdateOperation: b.handleWrite(), + logical.DeleteOperation: b.handleDelete(), + logical.ListOperation: b.handleList(), + }, + + ExistenceCheck: b.handleExistenceCheck(), + + HelpSynopsis: strings.TrimSpace(passthroughHelpSynopsis), + HelpDescription: strings.TrimSpace(passthroughHelpDescription), + }, + }, + Secrets: []*framework.Secret{ + &framework.Secret{ + Type: "kv", + + Renew: b.handleRead(), + Revoke: func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // This is a no-op + return nil, nil + }, + }, + }, + } + + if conf == nil { + return nil, fmt.Errorf("Configuation passed into backend is nil") + } + backend.Setup(ctx, conf) + b.Backend = backend + + return b, nil +} + +// PassthroughBackend is used storing secrets directly into the physical +// backend. The secrets are encrypted in the durable storage and custom TTL +// information can be specified, but otherwise this backend doesn't do anything +// fancy. +type PassthroughBackend struct { + *framework.Backend + generateLeases bool +} + +func (b *PassthroughBackend) handleExistenceCheck() framework.ExistenceFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + key := data.Get("path").(string) + + out, err := req.Storage.Get(ctx, key) + if err != nil { + return false, fmt.Errorf("existence check failed: %v", err) + } + + return out != nil, nil + } +} + +func (b *PassthroughBackend) handleRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + // Read the path + out, err := req.Storage.Get(ctx, key) + if err != nil { + return nil, fmt.Errorf("read failed: %v", err) + } + + // Fast-path the no data case + if out == nil { + return nil, nil + } + + // Decode the data + var rawData map[string]interface{} + + if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil { + return nil, fmt.Errorf("json decoding failed: %v", err) + } + + var resp *logical.Response + if b.generateLeases { + // Generate the response + resp = b.Secret("kv").Response(rawData, nil) + resp.Secret.Renewable = false + } else { + resp = &logical.Response{ + Secret: &logical.Secret{}, + Data: rawData, + } + } + + // Ensure seal wrapping is carried through if the response is + // response-wrapped + if out.SealWrap { + if resp.WrapInfo == nil { + resp.WrapInfo = &wrapping.ResponseWrapInfo{} + } + resp.WrapInfo.SealWrap = out.SealWrap + } + + // Check if there is a ttl key + ttlDuration := b.System().DefaultLeaseTTL() + ttlRaw, ok := rawData["ttl"] + if !ok { + ttlRaw, ok = rawData["lease"] + } + if ok { + dur, err := parseutil.ParseDurationSecond(ttlRaw) + if err == nil { + ttlDuration = dur + } + + if b.generateLeases { + resp.Secret.Renewable = true + } + } + + resp.Secret.TTL = ttlDuration + + return resp, nil + } +} + +func (b *PassthroughBackend) GeneratesLeases() bool { + return b.generateLeases +} + +func (b *PassthroughBackend) handleWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + if key == "" { + return logical.ErrorResponse("missing path"), nil + } + + // Check that some fields are given + if len(req.Data) == 0 { + return logical.ErrorResponse("missing data fields"), nil + } + + // JSON encode the data + buf, err := json.Marshal(req.Data) + if err != nil { + return nil, fmt.Errorf("json encoding failed: %v", err) + } + + // Write out a new key + entry := &logical.StorageEntry{ + Key: key, + Value: buf, + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, fmt.Errorf("failed to write: %v", err) + } + + return nil, nil + } +} + +func (b *PassthroughBackend) handleDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + // Delete the key at the request path + if err := req.Storage.Delete(ctx, key); err != nil { + return nil, err + } + + return nil, nil + } +} + +func (b *PassthroughBackend) handleList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Right now we only handle directories, so ensure it ends with /; however, + // some physical backends may not handle the "/" case properly, so only add + // it if we're not listing the root + path := data.Get("path").(string) + if path != "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } + + // List the keys at the prefix given by the request + keys, err := req.Storage.List(ctx, path) + if err != nil { + return nil, err + } + + // Generate the response + return logical.ListResponse(keys), nil + } +} + +const passthroughHelp = ` +The kv backend reads and writes arbitrary secrets to the backend. +The secrets are encrypted/decrypted by Vault: they are never stored +unencrypted in the backend and the backend never has an opportunity to +see the unencrypted value. + +TTLs can be set on a per-secret basis. These TTLs will be sent down +when that secret is read, and it is assumed that some outside process will +revoke and/or replace the secret at that path. +` + +const passthroughHelpSynopsis = ` +Pass-through secret storage to the storage backend, allowing you to +read/write arbitrary data into secret storage. +` + +const passthroughHelpDescription = ` +The pass-through backend reads and writes arbitrary data into secret storage, +encrypting it along the way. + +A TTL can be specified when writing with the "ttl" field. If given, the +duration of leases returned by this backend will be set to this value. This +can be used as a hint from the writer of a secret to the consumer of a secret +that the consumer should re-read the value before the TTL has expired. +However, any revocation must be handled by the user of this backend; the lease +duration does not affect the provided data in any way. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_config.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_config.go new file mode 100644 index 00000000..c7155097 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_config.go @@ -0,0 +1,156 @@ +package kv + +import ( + "context" + "path" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathConfig returns the path configuration for CRUD operations on the backend +// configuration. +func pathConfig(b *versionedKVBackend) *framework.Path { + return &framework.Path{ + Pattern: "config$", + Fields: map[string]*framework.FieldSchema{ + "max_versions": { + Type: framework.TypeInt, + Description: "The number of versions to keep for each key. Defaults to 10", + }, + "cas_required": { + Type: framework.TypeBool, + Description: "If true, the backend will require the cas parameter to be set for each write", + }, + "delete_version_after": { + Type: framework.TypeSignedDurationSecond, + Description: ` +If set, the length of time before a version is deleted. A negative duration +disables the use of delete_version_after on all keys. A zero duration +clears the current setting. Accepts a Go duration format string.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.upgradeCheck(b.pathConfigWrite()), + Summary: "Configure backend level settings that are applied to every key in the key-value store.", + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.upgradeCheck(b.pathConfigWrite()), + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.upgradeCheck(b.pathConfigRead()), + Summary: "Read the backend level settings.", + }, + }, + + HelpSynopsis: confHelpSyn, + HelpDescription: confHelpDesc, + } +} + +// pathConfigWrite handles create and update commands to the config +func (b *versionedKVBackend) pathConfigRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config, err := b.config(ctx, req.Storage) + if err != nil { + return nil, err + } + + rdata := map[string]interface{}{ + "max_versions": config.MaxVersions, + "cas_required": config.CasRequired, + } + + var deleteVersionAfter time.Duration + if config.GetDeleteVersionAfter() != nil { + deleteVersionAfter, err = ptypes.Duration(config.GetDeleteVersionAfter()) + if err != nil { + return nil, err + } + rdata["delete_version_after"] = deleteVersionAfter.String() + } + + return &logical.Response{ + Data: rdata, + }, nil + } +} + +// pathConfigWrite handles create and update commands to the config +func (b *versionedKVBackend) pathConfigWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + maxRaw, mOk := data.GetOk("max_versions") + casRaw, cOk := data.GetOk("cas_required") + dvaRaw, dvaOk := data.GetOk("delete_version_after") + + // Fast path validation + if !mOk && !cOk && !dvaOk { + return nil, nil + } + + config, err := b.config(ctx, req.Storage) + if err != nil { + return nil, err + } + + if mOk { + config.MaxVersions = uint32(maxRaw.(int)) + } + if cOk { + config.CasRequired = casRaw.(bool) + } + + if dvaOk { + dva := dvaRaw.(int) + switch { + case dva < 0: + config.DisableDeleteVersionAfter() + case dva == 0: + config.ResetDeleteVersionAfter() + default: + config.DeleteVersionAfter = ptypes.DurationProto(time.Duration(dva) * time.Second) + } + } + + bytes, err := proto.Marshal(config) + if err != nil { + return nil, err + } + + err = req.Storage.Put(ctx, &logical.StorageEntry{ + Key: path.Join(b.storagePrefix, configPath), + Value: bytes, + }) + if err != nil { + return nil, err + } + + b.globalConfigLock.Lock() + defer b.globalConfigLock.Unlock() + + b.globalConfig = config + + return nil, nil + } +} + +const confHelpSyn = `Configures settings for the KV store` +const confHelpDesc = ` +This path configures backend level settings that are applied to every key in the +key-value store. This parameter accetps: + + * max_versions (int) - The number of versions to keep for each key. Defaults + to 10 + + * cas_required (bool) - If true, the backend will require the cas parameter + to be set for each write + + * delete_version_after (duration) - If set, the length of time before a + version is deleted. A negative duration disables the use of + delete_version_after on all keys. A zero duration clears the current + setting. Accepts a Go duration format string. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_data.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_data.go new file mode 100644 index 00000000..1d41f41c --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_data.go @@ -0,0 +1,457 @@ +package kv + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +// pathConfig returns the path configuration for CRUD operations on the backend +// configuration. +func pathData(b *versionedKVBackend) *framework.Path { + return &framework.Path{ + Pattern: "data/" + framework.MatchAllRegex("path"), + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + "version": { + Type: framework.TypeInt, + Description: "If provided during a read, the value at the version number will be returned", + }, + "options": { + Type: framework.TypeMap, + Description: `Options for writing a KV entry. + +Set the "cas" value to use a Check-And-Set operation. If not set the write will +be allowed. If set to 0 a write will only be allowed if the key doesn’t exist. +If the index is non-zero the write will only be allowed if the key’s current +version matches the version specified in the cas parameter.`, + }, + "data": { + Type: framework.TypeMap, + Description: "The contents of the data map will be stored and returned on read.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.upgradeCheck(b.pathDataWrite()), + logical.CreateOperation: b.upgradeCheck(b.pathDataWrite()), + logical.ReadOperation: b.upgradeCheck(b.pathDataRead()), + logical.DeleteOperation: b.upgradeCheck(b.pathDataDelete()), + }, + + ExistenceCheck: b.dataExistenceCheck(), + + HelpSynopsis: dataHelpSyn, + HelpDescription: dataHelpDesc, + } +} + +func (b *versionedKVBackend) dataExistenceCheck() framework.ExistenceFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + key := data.Get("path").(string) + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + // If we are returning a readonly error it means we are attempting + // to write the policy for the first time. This means no data exists + // yet and we can safely return false here. + if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return false, nil + } + + return false, err + } + + return meta != nil, nil + } +} + +// pathDataRead handles read commands to a kv entry +func (b *versionedKVBackend) pathDataRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + lock := locksutil.LockForKey(b.locks, key) + lock.RLock() + defer lock.RUnlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + verNum := meta.CurrentVersion + verParam := data.Get("version").(int) + if verParam > 0 { + verNum = uint64(verParam) + } + + // If there is no version with that number, return + vm := meta.Versions[verNum] + if vm == nil { + return nil, nil + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "data": nil, + "metadata": map[string]interface{}{ + "version": verNum, + "created_time": ptypesTimestampToString(vm.CreatedTime), + "deletion_time": ptypesTimestampToString(vm.DeletionTime), + "destroyed": vm.Destroyed, + }, + }, + } + + // If the version has been deleted return metadata with a 404 + if vm.DeletionTime != nil { + deletionTime, err := ptypes.Timestamp(vm.DeletionTime) + if err != nil { + return nil, err + } + + if deletionTime.Before(time.Now()) { + return logical.RespondWithStatusCode(resp, req, http.StatusNotFound) + + } + } + + // If the version has been destroyed return metadata with a 404 + if vm.Destroyed { + return logical.RespondWithStatusCode(resp, req, http.StatusNotFound) + + } + + versionKey, err := b.getVersionKey(ctx, key, verNum, req.Storage) + if err != nil { + return nil, err + } + + raw, err := req.Storage.Get(ctx, versionKey) + if err != nil { + return nil, err + } + if raw == nil { + return nil, errors.New("could not find version data") + } + + version := &Version{} + if err := proto.Unmarshal(raw.Value, version); err != nil { + return nil, err + } + + vData := map[string]interface{}{} + if err := json.Unmarshal(version.Data, &vData); err != nil { + return nil, err + } + + resp.Data["data"] = vData + + return resp, nil + } +} + +// pathDataWrite handles create and update commands to a kv entry +func (b *versionedKVBackend) pathDataWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + if key == "" { + return logical.ErrorResponse("missing path"), nil + } + + config, err := b.config(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Parse data, this can happen before the lock so we can fail early if + // not set. + var marshaledData []byte + { + dataRaw, ok := data.GetOk("data") + if !ok { + return logical.ErrorResponse("no data provided"), logical.ErrInvalidRequest + } + marshaledData, err = json.Marshal(dataRaw.(map[string]interface{})) + if err != nil { + return nil, err + } + } + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + meta = &KeyMetadata{ + Key: key, + Versions: map[uint64]*VersionMetadata{}, + } + } + + // Parse options + { + var casRaw interface{} + var casOk bool + optionsRaw, ok := data.GetOk("options") + if ok { + options := optionsRaw.(map[string]interface{}) + + // Verify the CAS parameter is valid. + casRaw, casOk = options["cas"] + } + + switch { + case casOk: + var cas int + if err := mapstructure.WeakDecode(casRaw, &cas); err != nil { + return logical.ErrorResponse("error parsing check-and-set parameter"), logical.ErrInvalidRequest + } + if uint64(cas) != meta.CurrentVersion { + return logical.ErrorResponse("check-and-set parameter did not match the current version"), logical.ErrInvalidRequest + } + case config.CasRequired, meta.CasRequired: + return logical.ErrorResponse("check-and-set parameter required for this call"), logical.ErrInvalidRequest + } + } + + // Create a version key for the new version + versionKey, err := b.getVersionKey(ctx, key, meta.CurrentVersion+1, req.Storage) + if err != nil { + return nil, err + } + version := &Version{ + Data: marshaledData, + CreatedTime: ptypes.TimestampNow(), + } + + ctime, err := ptypes.Timestamp(version.CreatedTime) + if err != nil { + return logical.ErrorResponse("unexpected error converting %T(%v) to time.Time: %v", version.CreatedTime, version.CreatedTime, err), logical.ErrInvalidRequest + } + + if !config.IsDeleteVersionAfterDisabled() { + if dtime, ok := deletionTime(ctime, deleteVersionAfter(config), deleteVersionAfter(meta)); ok { + dt, err := ptypes.TimestampProto(dtime) + if err != nil { + return logical.ErrorResponse("error setting deletion_time: converting %v to protobuf: %v", dtime, err), logical.ErrInvalidRequest + } + version.DeletionTime = dt + } + } + + buf, err := proto.Marshal(version) + if err != nil { + return nil, err + } + + // Write the new version + if err := req.Storage.Put(ctx, &logical.StorageEntry{ + Key: versionKey, + Value: buf, + }); err != nil { + return nil, err + } + + vm, versionToDelete := meta.AddVersion(version.CreatedTime, version.DeletionTime, config.MaxVersions) + err = b.writeKeyMetadata(ctx, req.Storage, meta) + if err != nil { + return nil, err + } + + // We create the response here so we can add warnings to it below. + resp := &logical.Response{ + Data: map[string]interface{}{ + "version": meta.CurrentVersion, + "created_time": ptypesTimestampToString(vm.CreatedTime), + "deletion_time": ptypesTimestampToString(vm.DeletionTime), + "destroyed": vm.Destroyed, + }, + } + + // Cleanup the version data that is past max version. + if versionToDelete > 0 { + + // Create a list of version keys to delete. We will delete from the + // back of the array so we can delete the oldest versions + // first. If there is an error deleting one of the keys we can + // ensure the rest will be deleted on the next go around. + var versionKeysToDelete []string + + for i := versionToDelete; i > 0; i-- { + versionKey, err := b.getVersionKey(ctx, key, i, req.Storage) + if err != nil { + resp.AddWarning(fmt.Sprintf("Error occured when cleaning up old versions, these will be cleaned up on next write: %s", err)) + return resp, nil + } + + // We intentionally do not return these errors here. If the get + // or delete fail they will be cleaned up on the next write. + v, err := req.Storage.Get(ctx, versionKey) + if err != nil { + resp.AddWarning(fmt.Sprintf("Error occured when cleaning up old versions, these will be cleaned up on next write: %s", err)) + return resp, nil + } + + if v == nil { + break + } + + // append to the end of the list + versionKeysToDelete = append(versionKeysToDelete, versionKey) + } + + // Walk the list backwards deleting the oldest versions first. This + // allows us to continue the cleanup on next write if an error + // occurs during one of the deletes. + for i := len(versionKeysToDelete) - 1; i >= 0; i-- { + err := req.Storage.Delete(ctx, versionKeysToDelete[i]) + if err != nil { + resp.AddWarning(fmt.Sprintf("Error occured when cleaning up old versions, these will be cleaned up on next write: %s", err)) + break + } + } + + } + + return resp, nil + } +} + +func (b *versionedKVBackend) pathDataDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + // If there is no latest version, or the latest version is already + // deleted or destroyed return + lv := meta.Versions[meta.CurrentVersion] + if lv == nil || lv.Destroyed { + return nil, nil + } + + if lv.DeletionTime != nil { + deletionTime, err := ptypes.Timestamp(lv.DeletionTime) + if err != nil { + return nil, err + } + + if deletionTime.Before(time.Now()) { + return nil, nil + } + } + + lv.DeletionTime = ptypes.TimestampNow() + + err = b.writeKeyMetadata(ctx, req.Storage, meta) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +// AddVersion adds a version to the key metadata and moves the sliding window of +// max versions. It returns the newly added version and the version to delete +// from storage. +func (k *KeyMetadata) AddVersion(createdTime, deletionTime *timestamp.Timestamp, configMaxVersions uint32) (*VersionMetadata, uint64) { + if k.Versions == nil { + k.Versions = map[uint64]*VersionMetadata{} + } + + vm := &VersionMetadata{ + CreatedTime: createdTime, + DeletionTime: deletionTime, + } + + k.CurrentVersion++ + k.Versions[k.CurrentVersion] = vm + k.UpdatedTime = createdTime + if k.CreatedTime == nil { + k.CreatedTime = createdTime + } + + var maxVersions uint32 + switch { + case max(k.MaxVersions, configMaxVersions) > 0: + maxVersions = max(k.MaxVersions, configMaxVersions) + default: + maxVersions = defaultMaxVersions + } + + if uint32(k.CurrentVersion-k.OldestVersion) >= maxVersions { + versionToDelete := k.CurrentVersion - uint64(maxVersions) + // We need to do a loop here in the event that max versions has + // changed and we need to delete more than one entry. + for i := k.OldestVersion; i < versionToDelete+1; i++ { + delete(k.Versions, i) + } + + k.OldestVersion = versionToDelete + 1 + + return vm, versionToDelete + } + + return vm, 0 +} + +func max(a, b uint32) uint32 { + if b > a { + return b + } + + return a +} + +const dataHelpSyn = `Write, Read, and Delete data in the Key-Value Store.` +const dataHelpDesc = ` +This path takes a key name and based on the opperation stores, retreives or +deletes versions of data. + +If a write operation is used the endpoint takes an options object and a data +object. The options object is used to pass some options to the write command and +the data object is encrypted and stored in the storage backend. Each write +operation for a key creates a new version and does not overwrite the previous +data. + +A read operation will return the latest version for a key unless the "version" +parameter is set, then it returns the version at that number. + +Delete operations are a soft delete. They will mark the latest version as +deleted, but the underlying data will not be fully removed. Delete operations +can be undone. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_delete.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_delete.go new file mode 100644 index 00000000..1560effb --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_delete.go @@ -0,0 +1,177 @@ +package kv + +import ( + "context" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathsDelete returns the path configuration for the delete and undelete paths +func pathsDelete(b *versionedKVBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "delete/" + framework.MatchAllRegex("path"), + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + "versions": { + Type: framework.TypeCommaIntSlice, + Description: "The versions to be archived. The versioned data will not be deleted, but it will no longer be returned in normal get requests.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.upgradeCheck(b.pathDeleteWrite()), + logical.CreateOperation: b.upgradeCheck(b.pathDeleteWrite()), + }, + + HelpSynopsis: deleteHelpSyn, + HelpDescription: deleteHelpDesc, + }, + &framework.Path{ + Pattern: "undelete/" + framework.MatchAllRegex("path"), + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + "versions": { + Type: framework.TypeCommaIntSlice, + Description: "The versions to unarchive. The versions will be restored and their data will be returned on normal get requests.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.upgradeCheck(b.pathUndeleteWrite()), + logical.CreateOperation: b.upgradeCheck(b.pathUndeleteWrite()), + }, + + HelpSynopsis: undeleteHelpSyn, + HelpDescription: undeleteHelpDesc, + }, + } +} + +// pathUndeleteWrite is used to undelete a set of versions +func (b *versionedKVBackend) pathUndeleteWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + versions := data.Get("versions").([]int) + if len(versions) == 0 { + return logical.ErrorResponse("No version number provided"), logical.ErrInvalidRequest + } + + config, err := b.config(ctx, req.Storage) + if err != nil { + return nil, err + } + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + for _, verNum := range versions { + // If there is no version or the version is destroyed continue + lv := meta.Versions[uint64(verNum)] + if lv == nil || lv.Destroyed { + continue + } + lv.DeletionTime = nil + + if !config.IsDeleteVersionAfterDisabled() { + if dtime, ok := deletionTime(time.Now(), deleteVersionAfter(config), deleteVersionAfter(meta)); ok { + dt, err := ptypes.TimestampProto(dtime) + if err != nil { + return logical.ErrorResponse("error setting deletion_time: converting %v to protobuf: %v", dtime, err), logical.ErrInvalidRequest + } + lv.DeletionTime = dt + } + } + } + err = b.writeKeyMetadata(ctx, req.Storage, meta) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +// pathDeleteWrite is used to delete a set of versions. +func (b *versionedKVBackend) pathDeleteWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + versions := data.Get("versions").([]int) + if len(versions) == 0 { + return logical.ErrorResponse("No version number provided"), logical.ErrInvalidRequest + } + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + for _, verNum := range versions { + // If there is no latest version, or the latest version is already + // deleted or destroyed continue + lv := meta.Versions[uint64(verNum)] + if lv == nil || lv.Destroyed { + continue + } + + if lv.DeletionTime != nil { + deletionTime, err := ptypes.Timestamp(lv.DeletionTime) + if err != nil { + return nil, err + } + + if deletionTime.Before(time.Now()) { + continue + } + } + + lv.DeletionTime = ptypes.TimestampNow() + } + + err = b.writeKeyMetadata(ctx, req.Storage, meta) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +const deleteHelpSyn = `Marks one or more versions as deleted in the KV store.` +const deleteHelpDesc = ` +Deletes the data for the provided version and path in the key-value store. The +versioned data will not be fully removed, but marked as deleted and will no +longer be returned in normal get requests. This operation can be undone. +` + +const undeleteHelpSyn = `Undeletes one or more versions from the KV store.` +const undeleteHelpDesc = ` +Undeletes the data for the provided version and path in the key-value store. +This restores the data, allowing it to be returned on get requests. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_destroy.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_destroy.go new file mode 100644 index 00000000..121ab5b9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_destroy.go @@ -0,0 +1,94 @@ +package kv + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathDestroy returns the path configuration for the destroy endpoint +func pathDestroy(b *versionedKVBackend) *framework.Path { + return &framework.Path{ + Pattern: "destroy/" + framework.MatchAllRegex("path"), + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + "versions": { + Type: framework.TypeCommaIntSlice, + Description: "The versions to destroy. Their data will be permanently deleted.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.upgradeCheck(b.pathDestroyWrite()), + logical.CreateOperation: b.upgradeCheck(b.pathDestroyWrite()), + }, + + HelpSynopsis: destroyHelpSyn, + HelpDescription: destroyHelpDesc, + } +} + +func (b *versionedKVBackend) pathDestroyWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + versions := data.Get("versions").([]int) + if len(versions) == 0 { + return logical.ErrorResponse("no version number provided"), logical.ErrInvalidRequest + } + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + for _, verNum := range versions { + // If there is no version, or the version is already destroyed, + // continue + lv := meta.Versions[uint64(verNum)] + if lv == nil || lv.Destroyed { + continue + } + + lv.Destroyed = true + } + + // Write the metadata key before deleting the versions + err = b.writeKeyMetadata(ctx, req.Storage, meta) + if err != nil { + return nil, err + } + + for _, verNum := range versions { + // Delete versioned data + versionKey, err := b.getVersionKey(ctx, key, uint64(verNum), req.Storage) + if err != nil { + return nil, err + } + + err = req.Storage.Delete(ctx, versionKey) + if err != nil { + return nil, err + } + } + + return nil, nil + } +} + +const destroyHelpSyn = `Permanently removes one or more versions in the KV store` +const destroyHelpDesc = ` +Permanently removes the specified version data for the provided key and version +numbers from the key-value store. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_metadata.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_metadata.go new file mode 100644 index 00000000..87d6b711 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/path_metadata.go @@ -0,0 +1,251 @@ +package kv + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathMetadata returns the path configuration for CRUD operations on the +// metadata endpoint +func pathMetadata(b *versionedKVBackend) *framework.Path { + return &framework.Path{ + Pattern: "metadata/" + framework.MatchAllRegex("path"), + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Location of the secret.", + }, + "cas_required": { + Type: framework.TypeBool, + Description: ` +If true the key will require the cas parameter to be set on all write requests. +If false, the backend’s configuration will be used.`, + }, + "max_versions": { + Type: framework.TypeInt, + Description: ` +The number of versions to keep. If not set, the backend’s configured max +version is used.`, + }, + "delete_version_after": { + Type: framework.TypeDurationSecond, + Description: ` +The length of time before a version is deleted. If not set, the backend's +configured delete_version_after is used. Cannot be greater than the +backend's delete_version_after. A zero duration clears the current setting. +A negative duration will cause an error. +`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.upgradeCheck(b.pathMetadataWrite()), + logical.CreateOperation: b.upgradeCheck(b.pathMetadataWrite()), + logical.ReadOperation: b.upgradeCheck(b.pathMetadataRead()), + logical.DeleteOperation: b.upgradeCheck(b.pathMetadataDelete()), + logical.ListOperation: b.upgradeCheck(b.pathMetadataList()), + }, + + ExistenceCheck: b.metadataExistenceCheck(), + + HelpSynopsis: confHelpSyn, + HelpDescription: confHelpDesc, + } +} + +func (b *versionedKVBackend) metadataExistenceCheck() framework.ExistenceFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + key := data.Get("path").(string) + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + // If we are returning a readonly error it means we are attempting + // to write the policy for the first time. This means no data exists + // yet and we can safely return false here. + if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return false, nil + } + + return false, err + } + + return meta != nil, nil + } +} + +func (b *versionedKVBackend) pathMetadataList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + // Get an encrypted key storage object + wrapper, err := b.getKeyEncryptor(ctx, req.Storage) + if err != nil { + return nil, err + } + + es := wrapper.Wrap(req.Storage) + + // Use encrypted key storage to list the keys + keys, err := es.List(ctx, key) + return logical.ListResponse(keys), err + } +} + +func (b *versionedKVBackend) pathMetadataRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + versions := make(map[string]interface{}, len(meta.Versions)) + for i, v := range meta.Versions { + versions[fmt.Sprintf("%d", i)] = map[string]interface{}{ + "created_time": ptypesTimestampToString(v.CreatedTime), + "deletion_time": ptypesTimestampToString(v.DeletionTime), + "destroyed": v.Destroyed, + } + } + + var deleteVersionAfter time.Duration + if meta.GetDeleteVersionAfter() != nil { + deleteVersionAfter, err = ptypes.Duration(meta.GetDeleteVersionAfter()) + if err != nil { + return nil, err + } + } + + return &logical.Response{ + Data: map[string]interface{}{ + "versions": versions, + "current_version": meta.CurrentVersion, + "oldest_version": meta.OldestVersion, + "created_time": ptypesTimestampToString(meta.CreatedTime), + "updated_time": ptypesTimestampToString(meta.UpdatedTime), + "max_versions": meta.MaxVersions, + "cas_required": meta.CasRequired, + "delete_version_after": deleteVersionAfter.String(), + }, + }, nil + } +} + +func (b *versionedKVBackend) pathMetadataWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + if key == "" { + return logical.ErrorResponse("missing path"), nil + } + + maxRaw, mOk := data.GetOk("max_versions") + casRaw, cOk := data.GetOk("cas_required") + deleteVersionAfterRaw, dvaOk := data.GetOk("delete_version_after") + + // Fast path validation + if !mOk && !cOk && !dvaOk { + return nil, nil + } + + config, err := b.config(ctx, req.Storage) + if err != nil { + return nil, err + } + + var resp *logical.Response + if cOk && config.CasRequired && !casRaw.(bool) { + resp = &logical.Response{} + resp.AddWarning("\"cas_required\" set to false, but is mandated by backend config. This value will be ignored.") + } + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + now := ptypes.TimestampNow() + meta = &KeyMetadata{ + Key: key, + Versions: map[uint64]*VersionMetadata{}, + CreatedTime: now, + UpdatedTime: now, + } + } + + if mOk { + meta.MaxVersions = uint32(maxRaw.(int)) + } + if cOk { + meta.CasRequired = casRaw.(bool) + } + if dvaOk { + meta.DeleteVersionAfter = ptypes.DurationProto(time.Duration(deleteVersionAfterRaw.(int)) * time.Second) + } + + err = b.writeKeyMetadata(ctx, req.Storage, meta) + return resp, err + } +} + +func (b *versionedKVBackend) pathMetadataDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := data.Get("path").(string) + + lock := locksutil.LockForKey(b.locks, key) + lock.Lock() + defer lock.Unlock() + + meta, err := b.getKeyMetadata(ctx, req.Storage, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + // Delete each version. + for id, _ := range meta.Versions { + versionKey, err := b.getVersionKey(ctx, key, id, req.Storage) + if err != nil { + return nil, err + } + + err = req.Storage.Delete(ctx, versionKey) + if err != nil { + return nil, err + } + } + + // Get an encrypted key storage object + wrapper, err := b.getKeyEncryptor(ctx, req.Storage) + if err != nil { + return nil, err + } + + es := wrapper.Wrap(req.Storage) + + // Use encrypted key storage to delete the key + err = es.Delete(ctx, key) + return nil, err + } +} + +const metadataHelpSyn = `Allows interaction with key metadata and settings in the KV store.` +const metadataHelpDesc = ` +This endpoint allows for reading, information about a key in the key-value +store, writing key settings, and permanently deleting a key and all versions. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.pb.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.pb.go new file mode 100644 index 00000000..3eb8cd96 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.pb.go @@ -0,0 +1,416 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: types.proto + +package kv + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// If values are added to this, be sure to update the config() function +type Configuration struct { + MaxVersions uint32 `protobuf:"varint,1,opt,name=max_versions,json=maxVersions,proto3" json:"max_versions,omitempty"` + CasRequired bool `protobuf:"varint,2,opt,name=cas_required,json=casRequired,proto3" json:"cas_required,omitempty"` + DeleteVersionAfter *duration.Duration `protobuf:"bytes,3,opt,name=delete_version_after,json=deleteVersionAfter,proto3" json:"delete_version_after,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configuration) Reset() { *m = Configuration{} } +func (m *Configuration) String() string { return proto.CompactTextString(m) } +func (*Configuration) ProtoMessage() {} +func (*Configuration) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} + +func (m *Configuration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configuration.Unmarshal(m, b) +} +func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) +} +func (m *Configuration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configuration.Merge(m, src) +} +func (m *Configuration) XXX_Size() int { + return xxx_messageInfo_Configuration.Size(m) +} +func (m *Configuration) XXX_DiscardUnknown() { + xxx_messageInfo_Configuration.DiscardUnknown(m) +} + +var xxx_messageInfo_Configuration proto.InternalMessageInfo + +func (m *Configuration) GetMaxVersions() uint32 { + if m != nil { + return m.MaxVersions + } + return 0 +} + +func (m *Configuration) GetCasRequired() bool { + if m != nil { + return m.CasRequired + } + return false +} + +func (m *Configuration) GetDeleteVersionAfter() *duration.Duration { + if m != nil { + return m.DeleteVersionAfter + } + return nil +} + +type VersionMetadata struct { + // CreatedTime is when the version was created. + CreatedTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // DeletionTime is the time this version becomes invalid. + // Set to Now() to delete the version before the configured + // delete time. + DeletionTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=deletion_time,json=deletionTime,proto3" json:"deletion_time,omitempty"` + // Destroyed is used to specify this version is + // a has been removed and the underlying data deleted. + Destroyed bool `protobuf:"varint,3,opt,name=destroyed,proto3" json:"destroyed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionMetadata) Reset() { *m = VersionMetadata{} } +func (m *VersionMetadata) String() string { return proto.CompactTextString(m) } +func (*VersionMetadata) ProtoMessage() {} +func (*VersionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1} +} + +func (m *VersionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionMetadata.Unmarshal(m, b) +} +func (m *VersionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionMetadata.Marshal(b, m, deterministic) +} +func (m *VersionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionMetadata.Merge(m, src) +} +func (m *VersionMetadata) XXX_Size() int { + return xxx_messageInfo_VersionMetadata.Size(m) +} +func (m *VersionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_VersionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionMetadata proto.InternalMessageInfo + +func (m *VersionMetadata) GetCreatedTime() *timestamp.Timestamp { + if m != nil { + return m.CreatedTime + } + return nil +} + +func (m *VersionMetadata) GetDeletionTime() *timestamp.Timestamp { + if m != nil { + return m.DeletionTime + } + return nil +} + +func (m *VersionMetadata) GetDestroyed() bool { + if m != nil { + return m.Destroyed + } + return false +} + +type KeyMetadata struct { + // Key is the key for this entry + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Versions is the map of versionID -> VersionMetadata. + // Useful when listing all versions. + Versions map[uint64]*VersionMetadata `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CurrentVersion is the latest version of the value + CurrentVersion uint64 `protobuf:"varint,3,opt,name=current_version,json=currentVersion,proto3" json:"current_version,omitempty"` + // OldestVersion is the oldest version of the value. + OldestVersion uint64 `protobuf:"varint,4,opt,name=oldest_version,json=oldestVersion,proto3" json:"oldest_version,omitempty"` + // Created time is when the metadata was created. + CreatedTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // Updated time was the last time the metadata version + // was updated. + UpdatedTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=updated_time,json=updatedTime,proto3" json:"updated_time,omitempty"` + // MaxVersions specifies how many versions to keep around. + // If empty value, defaults to the configured Max + // for the mount. + MaxVersions uint32 `protobuf:"varint,7,opt,name=max_versions,json=maxVersions,proto3" json:"max_versions,omitempty"` + // CasRequired specifies if the cas parameter is + // required for this key + CasRequired bool `protobuf:"varint,8,opt,name=cas_required,json=casRequired,proto3" json:"cas_required,omitempty"` + // DeleteVersionAfter specifies how long to keep versions around. If + // empty value, defaults to the configured delete_version_after for the + // mount. + DeleteVersionAfter *duration.Duration `protobuf:"bytes,9,opt,name=delete_version_after,json=deleteVersionAfter,proto3" json:"delete_version_after,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyMetadata) Reset() { *m = KeyMetadata{} } +func (m *KeyMetadata) String() string { return proto.CompactTextString(m) } +func (*KeyMetadata) ProtoMessage() {} +func (*KeyMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} + +func (m *KeyMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyMetadata.Unmarshal(m, b) +} +func (m *KeyMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyMetadata.Marshal(b, m, deterministic) +} +func (m *KeyMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyMetadata.Merge(m, src) +} +func (m *KeyMetadata) XXX_Size() int { + return xxx_messageInfo_KeyMetadata.Size(m) +} +func (m *KeyMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_KeyMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyMetadata proto.InternalMessageInfo + +func (m *KeyMetadata) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyMetadata) GetVersions() map[uint64]*VersionMetadata { + if m != nil { + return m.Versions + } + return nil +} + +func (m *KeyMetadata) GetCurrentVersion() uint64 { + if m != nil { + return m.CurrentVersion + } + return 0 +} + +func (m *KeyMetadata) GetOldestVersion() uint64 { + if m != nil { + return m.OldestVersion + } + return 0 +} + +func (m *KeyMetadata) GetCreatedTime() *timestamp.Timestamp { + if m != nil { + return m.CreatedTime + } + return nil +} + +func (m *KeyMetadata) GetUpdatedTime() *timestamp.Timestamp { + if m != nil { + return m.UpdatedTime + } + return nil +} + +func (m *KeyMetadata) GetMaxVersions() uint32 { + if m != nil { + return m.MaxVersions + } + return 0 +} + +func (m *KeyMetadata) GetCasRequired() bool { + if m != nil { + return m.CasRequired + } + return false +} + +func (m *KeyMetadata) GetDeleteVersionAfter() *duration.Duration { + if m != nil { + return m.DeleteVersionAfter + } + return nil +} + +type Version struct { + // Data is a JSON object with string keys that + // represents the user supplied data. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // CreatedTime is when the version was created. + CreatedTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // DeletionTime is the time this version becomes invalid. + // Set to Now() to delete the version before the configured + // deletion time. + DeletionTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=deletion_time,json=deletionTime,proto3" json:"deletion_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} + +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Version) GetCreatedTime() *timestamp.Timestamp { + if m != nil { + return m.CreatedTime + } + return nil +} + +func (m *Version) GetDeletionTime() *timestamp.Timestamp { + if m != nil { + return m.DeletionTime + } + return nil +} + +type UpgradeInfo struct { + // Started time is when the upgrade was started. + StartedTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + // done is set to true once the backend has been successfully + // upgraded. + Done bool `protobuf:"varint,2,opt,name=done,proto3" json:"done,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeInfo) Reset() { *m = UpgradeInfo{} } +func (m *UpgradeInfo) String() string { return proto.CompactTextString(m) } +func (*UpgradeInfo) ProtoMessage() {} +func (*UpgradeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} + +func (m *UpgradeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeInfo.Unmarshal(m, b) +} +func (m *UpgradeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeInfo.Marshal(b, m, deterministic) +} +func (m *UpgradeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeInfo.Merge(m, src) +} +func (m *UpgradeInfo) XXX_Size() int { + return xxx_messageInfo_UpgradeInfo.Size(m) +} +func (m *UpgradeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeInfo proto.InternalMessageInfo + +func (m *UpgradeInfo) GetStartedTime() *timestamp.Timestamp { + if m != nil { + return m.StartedTime + } + return nil +} + +func (m *UpgradeInfo) GetDone() bool { + if m != nil { + return m.Done + } + return false +} + +func init() { + proto.RegisterType((*Configuration)(nil), "kv.Configuration") + proto.RegisterType((*VersionMetadata)(nil), "kv.VersionMetadata") + proto.RegisterType((*KeyMetadata)(nil), "kv.KeyMetadata") + proto.RegisterMapType((map[uint64]*VersionMetadata)(nil), "kv.KeyMetadata.VersionsEntry") + proto.RegisterType((*Version)(nil), "kv.Version") + proto.RegisterType((*UpgradeInfo)(nil), "kv.UpgradeInfo") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 484 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x8e, 0xd3, 0x30, + 0x18, 0x54, 0xd2, 0xec, 0x6e, 0xfb, 0x39, 0xd9, 0x45, 0x86, 0x43, 0xa9, 0xf8, 0x29, 0x91, 0x10, + 0xe5, 0x92, 0x95, 0xca, 0x05, 0x90, 0x56, 0x08, 0x01, 0x07, 0xb4, 0x42, 0x42, 0x16, 0x70, 0x2d, + 0xde, 0xfa, 0x6b, 0x15, 0xb5, 0x8d, 0x83, 0xe3, 0x54, 0x9b, 0x87, 0xe0, 0x11, 0xb8, 0xf0, 0x02, + 0xbc, 0x22, 0xb2, 0x63, 0x77, 0x97, 0x82, 0x54, 0x0a, 0x37, 0x6b, 0x3c, 0xf3, 0x79, 0x3c, 0x9e, + 0x04, 0x88, 0x6e, 0x4a, 0xac, 0xb2, 0x52, 0x49, 0x2d, 0x69, 0xb8, 0x58, 0x0f, 0xee, 0xcf, 0xa5, + 0x9c, 0x2f, 0xf1, 0xd4, 0x22, 0x17, 0xf5, 0xec, 0x54, 0xe7, 0x2b, 0xac, 0x34, 0x5f, 0x95, 0x2d, + 0x69, 0x70, 0x6f, 0x9b, 0x20, 0x6a, 0xc5, 0x75, 0x2e, 0x8b, 0x76, 0x3f, 0xfd, 0x1e, 0x40, 0xf2, + 0x4a, 0x16, 0xb3, 0x7c, 0xee, 0x70, 0xfa, 0x00, 0xe2, 0x15, 0xbf, 0x9c, 0xac, 0x51, 0x55, 0xb9, + 0x2c, 0xaa, 0x7e, 0x30, 0x0c, 0x46, 0x09, 0x23, 0x2b, 0x7e, 0xf9, 0xc9, 0x41, 0x86, 0x32, 0xe5, + 0xd5, 0x44, 0xe1, 0x97, 0x3a, 0x57, 0x28, 0xfa, 0xe1, 0x30, 0x18, 0x75, 0x19, 0x99, 0xf2, 0x8a, + 0x39, 0x88, 0x9e, 0xc3, 0x2d, 0x81, 0x4b, 0xd4, 0xe8, 0x07, 0x4d, 0xf8, 0x4c, 0xa3, 0xea, 0x77, + 0x86, 0xc1, 0x88, 0x8c, 0x6f, 0x67, 0xad, 0xad, 0xcc, 0xdb, 0xca, 0x5e, 0xbb, 0xe3, 0x19, 0x6d, + 0x65, 0xee, 0xac, 0x97, 0x46, 0x94, 0xfe, 0x08, 0xe0, 0xc4, 0x01, 0xef, 0x50, 0x73, 0xc1, 0x35, + 0xa7, 0x67, 0x10, 0x4f, 0x15, 0x72, 0x8d, 0x62, 0x62, 0xee, 0x6c, 0x6d, 0x92, 0xf1, 0xe0, 0xb7, + 0xc1, 0x1f, 0x7c, 0x20, 0x8c, 0x38, 0xbe, 0x41, 0xe8, 0x0b, 0x48, 0xec, 0x41, 0xc6, 0x99, 0xd5, + 0x87, 0x3b, 0xf5, 0xb1, 0x17, 0xd8, 0x01, 0x77, 0xa0, 0x27, 0xb0, 0xd2, 0x4a, 0x36, 0x28, 0xec, + 0xad, 0xba, 0xec, 0x0a, 0x48, 0xbf, 0x46, 0x40, 0xce, 0xb1, 0xd9, 0xb8, 0xbd, 0x01, 0x9d, 0x05, + 0x36, 0xd6, 0x64, 0x8f, 0x99, 0x25, 0x7d, 0x06, 0xdd, 0x4d, 0xc4, 0xe1, 0xb0, 0x33, 0x22, 0xe3, + 0xbb, 0xd9, 0x62, 0x9d, 0x5d, 0x13, 0x65, 0x3e, 0xef, 0x37, 0x85, 0x56, 0x0d, 0xdb, 0xd0, 0xe9, + 0x23, 0x38, 0x99, 0xd6, 0x4a, 0x61, 0xa1, 0x7d, 0xb8, 0xd6, 0x40, 0xc4, 0x8e, 0x1d, 0xec, 0x84, + 0xf4, 0x21, 0x1c, 0xcb, 0xa5, 0x31, 0xb5, 0xe1, 0x45, 0x96, 0x97, 0xb4, 0xa8, 0xa7, 0x6d, 0x47, + 0x79, 0xb0, 0x5f, 0x94, 0x67, 0x10, 0xd7, 0xa5, 0xb8, 0x92, 0x1f, 0xee, 0x96, 0x3b, 0xbe, 0x95, + 0x6f, 0xf7, 0xed, 0x68, 0x77, 0xdf, 0xba, 0x7f, 0xdf, 0xb7, 0xde, 0x3f, 0xf4, 0x6d, 0xf0, 0x1e, + 0x92, 0x5f, 0xb2, 0xbf, 0xfe, 0x7c, 0x51, 0xfb, 0x7c, 0x8f, 0xe1, 0x60, 0xcd, 0x97, 0xb5, 0xef, + 0xcd, 0x4d, 0xf3, 0x76, 0x5b, 0x15, 0x65, 0x2d, 0xe3, 0x79, 0xf8, 0x34, 0x48, 0xbf, 0x05, 0x70, + 0xe4, 0xe3, 0xa6, 0x10, 0x99, 0x6d, 0x3b, 0x2d, 0x66, 0xd1, 0x1f, 0xdb, 0x1c, 0xfe, 0x67, 0x9b, + 0x3b, 0xfb, 0xb5, 0x39, 0xfd, 0x0c, 0xe4, 0x63, 0x39, 0x57, 0x5c, 0xe0, 0xdb, 0x62, 0x26, 0x8d, + 0x9d, 0x4a, 0x73, 0xb5, 0xcf, 0xc7, 0xe5, 0xf8, 0xd6, 0x8e, 0xb9, 0xa1, 0x2c, 0xd0, 0xfd, 0x17, + 0xec, 0xfa, 0xe2, 0xd0, 0x8a, 0x9e, 0xfc, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x81, 0xe5, 0xbe, 0x7d, + 0xc3, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.proto b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.proto new file mode 100644 index 00000000..f45095f6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/types.proto @@ -0,0 +1,88 @@ +syntax = "proto3"; +package kv; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +// If values are added to this, be sure to update the config() function +message Configuration { + uint32 max_versions = 1; + bool cas_required = 2; + google.protobuf.Duration delete_version_after = 3; +} + +message VersionMetadata { + // CreatedTime is when the version was created. + google.protobuf.Timestamp created_time = 1; + + // DeletionTime is the time this version becomes invalid. + // Set to Now() to delete the version before the configured + // delete time. + google.protobuf.Timestamp deletion_time = 2; + + // Destroyed is used to specify this version is + // a has been removed and the underlying data deleted. + bool destroyed = 3; +} + +message KeyMetadata { + // Key is the key for this entry + string key = 1; + + // Versions is the map of versionID -> VersionMetadata. + // Useful when listing all versions. + map versions = 2; + + // CurrentVersion is the latest version of the value + uint64 current_version = 3; + + // OldestVersion is the oldest version of the value. + uint64 oldest_version = 4; + + // Created time is when the metadata was created. + google.protobuf.Timestamp created_time = 5; + + // Updated time was the last time the metadata version + // was updated. + google.protobuf.Timestamp updated_time = 6; + + // MaxVersions specifies how many versions to keep around. + // If empty value, defaults to the configured Max + // for the mount. + uint32 max_versions = 7; + + // CasRequired specifies if the cas parameter is + // required for this key + bool cas_required = 8; + + // DeleteVersionAfter specifies how long to keep versions around. If + // empty value, defaults to the configured delete_version_after for the + // mount. + google.protobuf.Duration delete_version_after = 9; +} + + +message Version { + // Data is a JSON object with string keys that + // represents the user supplied data. + bytes data = 1; + + // CreatedTime is when the version was created. + google.protobuf.Timestamp created_time = 2; + + // DeletionTime is the time this version becomes invalid. + // Set to Now() to delete the version before the configured + // deletion time. + google.protobuf.Timestamp deletion_time = 3; +} + +message UpgradeInfo { + // Started time is when the upgrade was started. + google.protobuf.Timestamp started_time = 1; + + // done is set to true once the backend has been successfully + // upgraded. + bool done = 2; +} + + diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/upgrade.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/upgrade.go new file mode 100644 index 00000000..35893921 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/upgrade.go @@ -0,0 +1,263 @@ +package kv + +import ( + "context" + "errors" + "fmt" + "path" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *versionedKVBackend) perfSecondaryCheck() bool { + replState := b.System().ReplicationState() + if (!b.System().LocalMount() && replState.HasState(consts.ReplicationPerformanceSecondary)) || + replState.HasState(consts.ReplicationPerformanceStandby) { + return true + } + return false +} + +func (b *versionedKVBackend) upgradeCheck(next framework.OperationFunc) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if atomic.LoadUint32(b.upgrading) == 1 { + // Sleep for a very short time before returning. This helps clients + // that are trying to access a mount immediately upon enabling be + // more likely to behave correctly since the operation should take + // almost no time. + time.Sleep(15 * time.Millisecond) + + if atomic.LoadUint32(b.upgrading) == 1 { + if b.perfSecondaryCheck() { + return logical.ErrorResponse("Waiting for the primary to upgrade from non-versioned to versioned data. This backend will be unavailable for a brief period and will resume service when the primary is finished."), logical.ErrInvalidRequest + } else { + return logical.ErrorResponse("Upgrading from non-versioned to versioned data. This backend will be unavailable for a brief period and will resume service shortly."), logical.ErrInvalidRequest + } + } + } + + return next(ctx, req, data) + } +} + +func (b *versionedKVBackend) upgradeDone(ctx context.Context, s logical.Storage) (bool, error) { + upgradeEntry, err := s.Get(ctx, path.Join(b.storagePrefix, "upgrading")) + if err != nil { + return false, err + } + + var upgradeInfo UpgradeInfo + if upgradeEntry != nil { + err := proto.Unmarshal(upgradeEntry.Value, &upgradeInfo) + if err != nil { + return false, err + } + } + + return upgradeInfo.Done, nil +} + +func (b *versionedKVBackend) Upgrade(ctx context.Context, s logical.Storage) error { + replState := b.System().ReplicationState() + + // Don't run if the plugin is in metadata mode. + if pluginutil.InMetadataMode() { + b.Logger().Info("upgrade not running while plugin is in metadata mode") + return nil + } + + // Don't run while on a DR secondary. + if replState.HasState(consts.ReplicationDRSecondary) { + b.Logger().Info("upgrade not running on disaster recovery replication secondary") + return nil + } + + if !atomic.CompareAndSwapUint32(b.upgrading, 0, 1) { + return errors.New("upgrade already in process") + } + + // If we are a replication secondary or performance standby, wait until the primary has finished + // upgrading. + if b.perfSecondaryCheck() { + b.Logger().Info("upgrade not running on performance replication secondary or performance standby") + + go func() { + for { + time.Sleep(time.Second) + + // If we failed because the context is closed we are + // shutting down. Close this go routine and set the upgrade + // flag back to 0 for good measure. + if ctx.Err() != nil { + atomic.StoreUint32(b.upgrading, 0) + return + } + + done, err := b.upgradeDone(ctx, s) + if err != nil { + b.Logger().Error("upgrading resulted in error", "error", err) + } + + if done { + break + } + } + + atomic.StoreUint32(b.upgrading, 0) + }() + + return nil + } + + upgradeInfo := &UpgradeInfo{ + StartedTime: ptypes.TimestampNow(), + } + + // Encode the canary + info, err := proto.Marshal(upgradeInfo) + if err != nil { + return err + } + + // Because this is a long running process we need a new context. + ctx = context.Background() + + upgradeKey := func(key string) error { + if strings.HasPrefix(key, b.storagePrefix) { + return nil + } + + // Read the old data + data, err := s.Get(ctx, key) + if err != nil { + return err + } + + locksutil.LockForKey(b.locks, key).Lock() + defer locksutil.LockForKey(b.locks, key).Unlock() + + meta := &KeyMetadata{ + Key: key, + Versions: map[uint64]*VersionMetadata{}, + } + + versionKey, err := b.getVersionKey(ctx, key, 1, s) + if err != nil { + return err + } + + version := &Version{ + Data: data.Value, + CreatedTime: ptypes.TimestampNow(), + } + + buf, err := proto.Marshal(version) + if err != nil { + return err + } + + // Store the version data + if err := s.Put(ctx, &logical.StorageEntry{ + Key: versionKey, + Value: buf, + }); err != nil { + return err + } + + // Store the metadata + meta.AddVersion(version.CreatedTime, nil, 1) + err = b.writeKeyMetadata(ctx, s, meta) + if err != nil { + return err + } + + // delete the old key + err = s.Delete(ctx, key) + if err != nil { + return err + } + + return nil + } + + // Run the actual upgrade in a go routine so we don't block the client on a + // potentially long process. + go func() { + + // Write the canary value and if we are read only wait until the setup + // process has finished. + READONLY_LOOP: + for { + err := s.Put(ctx, &logical.StorageEntry{ + Key: path.Join(b.storagePrefix, "upgrading"), + Value: info, + }) + switch { + case err == nil: + break READONLY_LOOP + case err.Error() == logical.ErrSetupReadOnly.Error(): + time.Sleep(10 * time.Millisecond) + default: + b.Logger().Error("writing upgrade info resulted in an error", "error", err) + return + } + } + + b.Logger().Info("collecting keys to upgrade") + keys, err := logical.CollectKeys(ctx, s) + if err != nil { + b.Logger().Error("upgrading resulted in error", "error", err) + return + } + + b.Logger().Info("done collecting keys", "num_keys", len(keys)) + for i, key := range keys { + if b.Logger().IsDebug() && i%500 == 0 { + b.Logger().Debug("upgrading keys", "progress", fmt.Sprintf("%d/%d", i, len(keys))) + } + err := upgradeKey(key) + if err != nil { + b.Logger().Error("upgrading resulted in error", "error", err, "progress", fmt.Sprintf("%d/%d", i+1, len(keys))) + return + } + } + + b.Logger().Info("upgrading keys finished") + + // We do this now so that we ensure it's written by the primary before + // secondaries unblock + b.l.Lock() + if _, err = b.policy(ctx, s); err != nil { + b.Logger().Error("error checking/creating policy after upgrade", "error", err) + } + b.l.Unlock() + + // Write upgrade done value + upgradeInfo.Done = true + info, err := proto.Marshal(upgradeInfo) + if err != nil { + b.Logger().Error("encoding upgrade info resulted in an error", "error", err) + } + + err = s.Put(ctx, &logical.StorageEntry{ + Key: path.Join(b.storagePrefix, "upgrading"), + Value: info, + }) + if err != nil { + b.Logger().Error("writing upgrade done resulted in an error", "error", err) + } + + atomic.StoreUint32(b.upgrading, 0) + }() + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go index 6807c89c..32c77bc6 100644 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -115,6 +115,26 @@ func (c *TokenAuth) LookupSelf() (*Secret, error) { return ParseSecret(resp.Body) } +func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/renew-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + "increment": increment, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { r := c.c.NewRequest("PUT", "/v1/auth/token/renew") if err := r.SetJSONBody(map[string]interface{}{ diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index fb50f207..2f4f90ee 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -88,6 +88,9 @@ type Config struct { // The Backoff function to use; a default is used if not provided Backoff retryablehttp.Backoff + // The CheckRetry function to use; a default is used if not provided + CheckRetry retryablehttp.CheckRetry + // Limiter is the rate limiter used by the client. // If this pointer is nil, then there will be no limit set. // In contrast, if this pointer is set, even to an empty struct, @@ -140,8 +143,8 @@ func DefaultConfig() *Config { config := &Config{ Address: "https://127.0.0.1:8200", HttpClient: cleanhttp.DefaultPooledClient(), + Timeout: time.Second * 60, } - config.HttpClient.Timeout = time.Second * 60 transport := config.HttpClient.Transport.(*http.Transport) transport.TLSHandshakeTimeout = 10 * time.Second @@ -427,10 +430,14 @@ func NewClient(c *Config) (*Client, error) { } client := &Client{ - addr: u, - config: c, + addr: u, + config: c, + headers: make(http.Header), } + // Add the VaultRequest SSRF protection header + client.headers[consts.RequestHeaderName] = []string{"true"} + if token := os.Getenv(EnvVaultToken); token != "" { client.token = token } @@ -488,6 +495,16 @@ func (c *Client) SetMaxRetries(retries int) { c.config.MaxRetries = retries } +// SetCheckRetry sets the CheckRetry function to be used for future requests. +func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.CheckRetry = checkRetry +} + // SetClientTimeout sets the client request timeout func (c *Client) SetClientTimeout(timeout time.Duration) { c.modifyLock.RLock() @@ -586,7 +603,7 @@ func (c *Client) ClearToken() { } // Headers gets the current set of headers used for requests. This returns a -// copy; to modify it make modifications locally and use SetHeaders. +// copy; to modify it call AddHeader or SetHeaders. func (c *Client) Headers() http.Header { c.modifyLock.RLock() defer c.modifyLock.RUnlock() @@ -605,11 +622,19 @@ func (c *Client) Headers() http.Header { return ret } -// SetHeaders sets the headers to be used for future requests. -func (c *Client) SetHeaders(headers http.Header) { +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { c.modifyLock.Lock() defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() c.headers = headers } @@ -643,6 +668,7 @@ func (c *Client) Clone() (*Client, error) { MaxRetries: config.MaxRetries, Timeout: config.Timeout, Backoff: config.Backoff, + CheckRetry: config.CheckRetry, Limiter: config.Limiter, } config.modifyLock.RUnlock() @@ -660,6 +686,12 @@ func (c *Client) SetPolicyOverride(override bool) { c.policyOverride = override } +// portMap defines the standard port map +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + // NewRequest creates a new raw request object to query the Vault server // configured for this client. This is an advanced method and generally // doesn't need to be called externally. @@ -669,7 +701,6 @@ func (c *Client) NewRequest(method, requestPath string) *Request { token := c.token mfaCreds := c.mfaCreds wrappingLookupFunc := c.wrappingLookupFunc - headers := c.headers policyOverride := c.policyOverride c.modifyLock.RUnlock() @@ -677,10 +708,16 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // record and take the highest match; this is not designed for high-availability, just discovery var host string = addr.Host if addr.Port() == "" { - // Internet Draft specifies that the SRV record is ignored if a port is given - _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) - if err == nil && len(addrs) > 0 { - host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + // Avoid lookup of SRV record if scheme is known + port, ok := portMap[addr.Scheme] + if ok { + host = net.JoinHostPort(host, port) + } else { + // Internet Draft specifies that the SRV record is ignored if a port is given + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } } } @@ -714,10 +751,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) } - if headers != nil { - req.Headers = headers - } - + req.Headers = c.Headers() req.PolicyOverride = policyOverride return req @@ -740,6 +774,7 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon c.config.modifyLock.RLock() limiter := c.config.Limiter maxRetries := c.config.MaxRetries + checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient timeout := c.config.Timeout @@ -776,6 +811,13 @@ START: } if timeout != 0 { + // NOTE: this leaks a timer. But when we defer a cancel call here for + // the returned function we see errors in tests with contxt canceled. + // Although the request is done by the time we exit this function it is + // still causing something else to go wrong. Maybe it ends up being + // tied to the response somehow and reading the response body ends up + // checking it, or something. I don't know, but until we can chase this + // down, keep it not-canceled even though vet complains. ctx, _ = context.WithTimeout(ctx, timeout) } req.Request = req.Request.WithContext(ctx) @@ -784,13 +826,17 @@ START: backoff = retryablehttp.LinearJitterBackoff } + if checkRetry == nil { + checkRetry = retryablehttp.DefaultRetryPolicy + } + client := &retryablehttp.Client{ HTTPClient: httpClient, RetryWaitMin: 1000 * time.Millisecond, RetryWaitMax: 1500 * time.Millisecond, RetryMax: maxRetries, - CheckRetry: retryablehttp.DefaultRetryPolicy, Backoff: backoff, + CheckRetry: checkRetry, ErrorHandler: retryablehttp.PassthroughErrorHandler, } diff --git a/vendor/github.com/hashicorp/vault/api/go.mod b/vendor/github.com/hashicorp/vault/api/go.mod index ae77ff6f..29810c6c 100644 --- a/vendor/github.com/hashicorp/vault/api/go.mod +++ b/vendor/github.com/hashicorp/vault/api/go.mod @@ -1,19 +1,20 @@ module github.com/hashicorp/vault/api -go 1.12 +go 1.13 replace github.com/hashicorp/vault/sdk => ../sdk require ( + github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/go-retryablehttp v0.5.4 + github.com/hashicorp/go-retryablehttp v0.6.2 github.com/hashicorp/go-rootcerts v1.0.1 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/vault/sdk v0.1.14-0.20190909201848-e0fbf9b652e2 + github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500 github.com/mitchellh/mapstructure v1.1.2 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/square/go-jose.v2 v2.3.1 ) diff --git a/vendor/github.com/hashicorp/vault/api/go.sum b/vendor/github.com/hashicorp/vault/api/go.sum index 0f4df0ae..56fb2811 100644 --- a/vendor/github.com/hashicorp/vault/api/go.sum +++ b/vendor/github.com/hashicorp/vault/api/go.sum @@ -1,20 +1,28 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -24,28 +32,39 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -60,9 +79,14 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= @@ -70,49 +94,38 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go new file mode 100644 index 00000000..841c51c0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -0,0 +1,384 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" +) + +var ( + ErrLifetimeWatcherMissingInput = errors.New("missing input") + ErrLifetimeWatcherMissingSecret = errors.New("missing secret") + ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable") + ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data") + + // Deprecated; kept for compatibility + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultLifetimeWatcherRenewBuffer = 5 + // Deprecated: kept for backwards compatibility + DefaultRenewerRenewBuffer = 5 +) + +type RenewBehavior uint + +const ( + // RenewBehaviorIgnoreErrors means we will attempt to keep renewing until + // we hit the lifetime threshold. It also ignores errors stemming from + // passing a non-renewable lease in. In practice, this means you simply + // reauthenticate/refetch credentials when the watcher exits. This is the + // default. + RenewBehaviorIgnoreErrors RenewBehavior = iota + + // RenewBehaviorRenewDisabled turns off renewal attempts entirely. This + // allows you to simply watch lifetime and have the watcher return at a + // reasonable threshold without actually making Vault calls. + RenewBehaviorRenewDisabled + + // RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits + // on some kind of error + RenewBehaviorErrorOnErrors +) + +// LifetimeWatcher is a process for watching lifetime of a secret. +// +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() +// +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// +// `DoneCh` will return if renewal fails, or if the remaining lease duration is +// under a built-in threshold and either renewing is not extending it or +// renewing is disabled. In both cases, the caller should attempt a re-read of +// the secret. Clients should check the return value of the channel to see if +// renewal was successful. +type LifetimeWatcher struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + renewBehavior RenewBehavior + + stopped bool + stopCh chan struct{} + + errLifetimeWatcherNotRenewable error + errLifetimeWatcherNoSecretData error +} + +// LifetimeWatcherInput is used as input to the renew function. +type LifetimeWatcherInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. + Increment int + + // RenewBehavior controls what happens when a renewal errors or the + // passed-in secret is not renewable. + RenewBehavior RenewBehavior +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewLifetimeWatcher creates a new renewer from the given input. +func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrLifetimeWatcherMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrLifetimeWatcherMissingSecret + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultLifetimeWatcherRenewBuffer + } + + return &LifetimeWatcher{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + renewBehavior: i.RenewBehavior, + + stopped: false, + stopCh: make(chan struct{}), + + errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable, + errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData, + }, nil +} + +// Deprecated: exists only for backwards compatibility. Calls +// NewLifetimeWatcher, and sets compatibility flags. +func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + renewer, err := c.NewLifetimeWatcher(i) + if err != nil { + return nil, err + } + + renewer.renewBehavior = RenewBehaviorErrorOnErrors + renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable + renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData + return renewer, err +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *LifetimeWatcher) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *LifetimeWatcher) Stop() { + r.l.Lock() + defer r.l.Unlock() + + if !r.stopped { + close(r.stopCh) + r.stopped = true + } +} + +// Start starts a background process for watching the lifetime of this secret. +// If renewal is enabled, when the secret has auth data, this attempts to renew +// the auth (token); When the secret has a lease, this attempts to renew the +// lease. +func (r *LifetimeWatcher) Start() { + r.doneCh <- r.doRenew() +} + +// Renew is for comnpatibility with the legacy api.Renewer. Calling Renew +// simply chains to Start. +func (r *LifetimeWatcher) Renew() { + r.Start() +} + +// renewAuth is a helper for renewing authentication. +func (r *LifetimeWatcher) doRenew() error { + var nonRenewable bool + var tokenMode bool + var initLeaseDuration int + var credString string + var renewFunc func(string, int) (*Secret, error) + + switch { + case r.secret.Auth != nil: + tokenMode = true + nonRenewable = !r.secret.Auth.Renewable + initLeaseDuration = r.secret.Auth.LeaseDuration + credString = r.secret.Auth.ClientToken + renewFunc = r.client.Auth().Token().RenewTokenAsSelf + default: + nonRenewable = !r.secret.Renewable + initLeaseDuration = r.secret.LeaseDuration + credString = r.secret.LeaseID + renewFunc = r.client.Sys().Renew + } + + if credString == "" || + (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { + return r.errLifetimeWatcherNotRenewable + } + + initialTime := time.Now() + priorDuration := time.Duration(initLeaseDuration) * time.Second + r.calculateGrace(priorDuration) + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + var leaseDuration time.Duration + fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now()) + + switch { + case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: + // Can't or won't renew, just keep the same expiration so we exit + // when it's reauthentication time + leaseDuration = fallbackLeaseDuration + + default: + // Renew the token + renewal, err := renewFunc(credString, r.increment) + if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { + if r.renewBehavior == RenewBehaviorErrorOnErrors { + if err != nil { + return err + } + if renewal == nil || (tokenMode && renewal.Auth == nil) { + return r.errLifetimeWatcherNoSecretData + } + } + + leaseDuration = fallbackLeaseDuration + break + } + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Possibly error if we are not renewable + if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) && + r.renewBehavior == RenewBehaviorErrorOnErrors { + return r.errLifetimeWatcherNotRenewable + } + + // Grab the lease duration + newDuration := renewal.LeaseDuration + if tokenMode { + newDuration = renewal.Auth.LeaseDuration + } + + leaseDuration = time.Duration(newDuration) * time.Second + } + + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if leaseDuration > priorDuration { + r.calculateGrace(leaseDuration) + } + priorDuration = leaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// sleepDuration calculates the time to sleep given the base lease duration. The +// base is the resulting lease duration. It will be reduced to 1/3 and +// multiplied by a random float between 0.0 and 1.0. This extra randomness +// prevents multiple clients from all trying to renew simultaneously. +func (r *LifetimeWatcher) sleepDuration(base time.Duration) time.Duration { + sleep := float64(base) + + // Renew at 1/3 the remaining lease. This will give us an opportunity to retry + // at least one more time should the first renewal fail. + sleep = sleep / 3.0 + + // Use a randomness so many clients do not hit Vault simultaneously. + sleep = sleep * (r.random.Float64() + 1) / 2.0 + + return time.Duration(sleep) +} + +// calculateGrace calculates the grace period based on a reasonable set of +// assumptions given the total lease time; it also adds some jitter to not have +// clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) { + if leaseDuration == 0 { + r.grace = 0 + return + } + + leaseNanos := float64(leaseDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} + +type Renewer = LifetimeWatcher +type RenewerInput = LifetimeWatcherInput diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index d5f94644..f4eb02a8 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -134,9 +134,20 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro return nil, err } + return c.write(path, r) +} + +func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/"+path) + r.BodyBytes = data + + return c.write(path, r) +} + +func (c *Logical) write(path string, request *Request) (*Secret, error) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.RawRequestWithContext(ctx, request) if resp != nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index e664d5eb..3aa4e6e4 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -118,6 +118,9 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) } + // Reset token value to make sure nothing has been set by default + client.ClearToken() + secret, err := client.Logical().Unwrap(unwrapToken) if err != nil { return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go deleted file mode 100644 index 1d37a193..00000000 --- a/vendor/github.com/hashicorp/vault/api/renewer.go +++ /dev/null @@ -1,349 +0,0 @@ -package api - -import ( - "errors" - "math/rand" - "sync" - "time" -) - -var ( - ErrRenewerMissingInput = errors.New("missing input to renewer") - ErrRenewerMissingSecret = errors.New("missing secret to renew") - ErrRenewerNotRenewable = errors.New("secret is not renewable") - ErrRenewerNoSecretData = errors.New("returned empty secret data") - - // DefaultRenewerRenewBuffer is the default size of the buffer for renew - // messages on the channel. - DefaultRenewerRenewBuffer = 5 -) - -// Renewer is a process for renewing a secret. -// -// renewer, err := client.NewRenewer(&RenewerInput{ -// Secret: mySecret, -// }) -// go renewer.Renew() -// defer renewer.Stop() -// -// for { -// select { -// case err := <-renewer.DoneCh(): -// if err != nil { -// log.Fatal(err) -// } -// -// // Renewal is now over -// case renewal := <-renewer.RenewCh(): -// log.Printf("Successfully renewed: %#v", renewal) -// } -// } -// -// -// The `DoneCh` will return if renewal fails or if the remaining lease duration -// after a renewal is less than or equal to the grace (in number of seconds). In -// both cases, the caller should attempt a re-read of the secret. Clients should -// check the return value of the channel to see if renewal was successful. -type Renewer struct { - l sync.Mutex - - client *Client - secret *Secret - grace time.Duration - random *rand.Rand - increment int - doneCh chan error - renewCh chan *RenewOutput - - stopped bool - stopCh chan struct{} -} - -// RenewerInput is used as input to the renew function. -type RenewerInput struct { - // Secret is the secret to renew - Secret *Secret - - // DEPRECATED: this does not do anything. - Grace time.Duration - - // Rand is the randomizer to use for underlying randomization. If not - // provided, one will be generated and seeded automatically. If provided, it - // is assumed to have already been seeded. - Rand *rand.Rand - - // RenewBuffer is the size of the buffered channel where renew messages are - // dispatched. - RenewBuffer int - - // The new TTL, in seconds, that should be set on the lease. The TTL set - // here may or may not be honored by the vault server, based on Vault - // configuration or any associated max TTL values. - Increment int -} - -// RenewOutput is the metadata returned to the client (if it's listening) to -// renew messages. -type RenewOutput struct { - // RenewedAt is the timestamp when the renewal took place (UTC). - RenewedAt time.Time - - // Secret is the underlying renewal data. It's the same struct as all data - // that is returned from Vault, but since this is renewal data, it will not - // usually include the secret itself. - Secret *Secret -} - -// NewRenewer creates a new renewer from the given input. -func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) { - if i == nil { - return nil, ErrRenewerMissingInput - } - - secret := i.Secret - if secret == nil { - return nil, ErrRenewerMissingSecret - } - - random := i.Rand - if random == nil { - random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - } - - renewBuffer := i.RenewBuffer - if renewBuffer == 0 { - renewBuffer = DefaultRenewerRenewBuffer - } - - return &Renewer{ - client: c, - secret: secret, - increment: i.Increment, - random: random, - doneCh: make(chan error, 1), - renewCh: make(chan *RenewOutput, renewBuffer), - - stopped: false, - stopCh: make(chan struct{}), - }, nil -} - -// DoneCh returns the channel where the renewer will publish when renewal stops. -// If there is an error, this will be an error. -func (r *Renewer) DoneCh() <-chan error { - return r.doneCh -} - -// RenewCh is a channel that receives a message when a successful renewal takes -// place and includes metadata about the renewal. -func (r *Renewer) RenewCh() <-chan *RenewOutput { - return r.renewCh -} - -// Stop stops the renewer. -func (r *Renewer) Stop() { - r.l.Lock() - if !r.stopped { - close(r.stopCh) - r.stopped = true - } - r.l.Unlock() -} - -// Renew starts a background process for renewing this secret. When the secret -// has auth data, this attempts to renew the auth (token). When the secret has -// a lease, this attempts to renew the lease. -func (r *Renewer) Renew() { - var result error - if r.secret.Auth != nil { - result = r.renewAuth() - } else { - result = r.renewLease() - } - - r.doneCh <- result -} - -// renewAuth is a helper for renewing authentication. -func (r *Renewer) renewAuth() error { - if !r.secret.Auth.Renewable || r.secret.Auth.ClientToken == "" { - return ErrRenewerNotRenewable - } - - priorDuration := time.Duration(r.secret.Auth.LeaseDuration) * time.Second - r.calculateGrace(priorDuration) - - client, token := r.client, r.secret.Auth.ClientToken - - for { - // Check if we are stopped. - select { - case <-r.stopCh: - return nil - default: - } - - // Renew the auth. - renewal, err := client.Auth().Token().RenewTokenAsSelf(token, r.increment) - if err != nil { - return err - } - - // Push a message that a renewal took place. - select { - case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: - default: - } - - // Somehow, sometimes, this happens. - if renewal == nil || renewal.Auth == nil { - return ErrRenewerNoSecretData - } - - // Do nothing if we are not renewable - if !renewal.Auth.Renewable { - return ErrRenewerNotRenewable - } - - // Grab the lease duration - leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second - - // We keep evaluating a new grace period so long as the lease is - // extending. Once it stops extending, we've hit the max and need to - // rely on the grace duration. - if leaseDuration > priorDuration { - r.calculateGrace(leaseDuration) - } - priorDuration = leaseDuration - - // The sleep duration is set to 2/3 of the current lease duration plus - // 1/3 of the current grace period, which adds jitter. - sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) - - // If we are within grace, return now; or, if the amount of time we - // would sleep would land us in the grace period. This helps with short - // tokens; for example, you don't want a current lease duration of 4 - // seconds, a grace period of 3 seconds, and end up sleeping for more - // than three of those seconds and having a very small budget of time - // to renew. - if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace { - return nil - } - - select { - case <-r.stopCh: - return nil - case <-time.After(sleepDuration): - continue - } - } -} - -// renewLease is a helper for renewing a lease. -func (r *Renewer) renewLease() error { - if !r.secret.Renewable || r.secret.LeaseID == "" { - return ErrRenewerNotRenewable - } - - priorDuration := time.Duration(r.secret.LeaseDuration) * time.Second - r.calculateGrace(priorDuration) - - client, leaseID := r.client, r.secret.LeaseID - - for { - // Check if we are stopped. - select { - case <-r.stopCh: - return nil - default: - } - - // Renew the lease. - renewal, err := client.Sys().Renew(leaseID, r.increment) - if err != nil { - return err - } - - // Push a message that a renewal took place. - select { - case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: - default: - } - - // Somehow, sometimes, this happens. - if renewal == nil { - return ErrRenewerNoSecretData - } - - // Do nothing if we are not renewable - if !renewal.Renewable { - return ErrRenewerNotRenewable - } - - // Grab the lease duration - leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second - - // We keep evaluating a new grace period so long as the lease is - // extending. Once it stops extending, we've hit the max and need to - // rely on the grace duration. - if leaseDuration > priorDuration { - r.calculateGrace(leaseDuration) - } - priorDuration = leaseDuration - - // The sleep duration is set to 2/3 of the current lease duration plus - // 1/3 of the current grace period, which adds jitter. - sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) - - // If we are within grace, return now; or, if the amount of time we - // would sleep would land us in the grace period. This helps with short - // tokens; for example, you don't want a current lease duration of 4 - // seconds, a grace period of 3 seconds, and end up sleeping for more - // than three of those seconds and having a very small budget of time - // to renew. - if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace { - return nil - } - - select { - case <-r.stopCh: - return nil - case <-time.After(sleepDuration): - continue - } - } -} - -// sleepDuration calculates the time to sleep given the base lease duration. The -// base is the resulting lease duration. It will be reduced to 1/3 and -// multiplied by a random float between 0.0 and 1.0. This extra randomness -// prevents multiple clients from all trying to renew simultaneously. -func (r *Renewer) sleepDuration(base time.Duration) time.Duration { - sleep := float64(base) - - // Renew at 1/3 the remaining lease. This will give us an opportunity to retry - // at least one more time should the first renewal fail. - sleep = sleep / 3.0 - - // Use a randomness so many clients do not hit Vault simultaneously. - sleep = sleep * (r.random.Float64() + 1) / 2.0 - - return time.Duration(sleep) -} - -// calculateGrace calculates the grace period based on a reasonable set of -// assumptions given the total lease time; it also adds some jitter to not have -// clients be in sync. -func (r *Renewer) calculateGrace(leaseDuration time.Duration) { - if leaseDuration == 0 { - r.grace = 0 - return - } - - leaseNanos := float64(leaseDuration.Nanoseconds()) - jitterMax := 0.1 * leaseNanos - - // For a given lease duration, we want to allow 80-90% of that to elapse, - // so the remaining amount is the grace period - r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go index 66f72dff..870dacb0 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -10,6 +10,10 @@ func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, err return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") } +func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/generate-recovery-token/attempt") +} + func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { r := c.c.NewRequest("GET", path) @@ -34,6 +38,10 @@ func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootSta return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) } +func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { body := map[string]interface{}{ "otp": otp, @@ -66,6 +74,10 @@ func (c *Sys) GenerateDROperationTokenCancel() error { return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") } +func (c *Sys) GenerateRecoveryOperationTokenCancel() error { + return c.generateRootCancelCommon("/v1/sys/generate-recovery-token/attempt") +} + func (c *Sys) generateRootCancelCommon(path string) error { r := c.c.NewRequest("DELETE", path) @@ -86,6 +98,10 @@ func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRoot return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) } +func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/generate-recovery-token/update", shard, nonce) +} + func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) { body := map[string]interface{}{ "key": shard, diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go index 09c9642a..40826a7d 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leases.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -28,7 +28,13 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { } func (c *Sys) Revoke(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id) + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke") + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return err + } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index 354b1ee9..589df945 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -129,12 +129,13 @@ func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { } type MountInput struct { - Type string `json:"type"` - Description string `json:"description"` - Config MountConfigInput `json:"config"` - Local bool `json:"local"` - SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` - Options map[string]string `json:"options"` + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + Options map[string]string `json:"options"` // Deprecated: Newer server responses should be returning this information in the // Type field (json: "type") instead. @@ -159,14 +160,15 @@ type MountConfigInput struct { } type MountOutput struct { - UUID string `json:"uuid"` - Type string `json:"type"` - Description string `json:"description"` - Accessor string `json:"accessor"` - Config MountConfigOutput `json:"config"` - Options map[string]string `json:"options"` - Local bool `json:"local"` - SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` } type MountConfigOutput struct { diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index 6897dc0a..908a3c4f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -2,6 +2,7 @@ package api import ( "context" + "fmt" "io" "net/http" @@ -16,10 +17,11 @@ type RaftJoinResponse struct { // RaftJoinRequest represents the parameters consumed by the raft join API type RaftJoinRequest struct { LeaderAPIAddr string `json:"leader_api_addr"` - LeaderCACert string `json:"leader_ca_cert":` + LeaderCACert string `json:"leader_ca_cert"` LeaderClientCert string `json:"leader_client_cert"` LeaderClientKey string `json:"leader_client_key"` Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` } // RaftJoin adds the node from which this call is invoked from to the raft @@ -94,6 +96,29 @@ func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { return nil } + // Check for a redirect, only allowing for a single redirect + if resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Retry the request + resp, err = c.c.config.HttpClient.Do(req) + if err != nil { + return err + } + } + result = &Response{Response: resp} if err := result.Error(); err != nil { return err diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index 301d3f26..20d41a28 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -77,6 +77,7 @@ type SealStatusResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` } type UnsealOpts struct { diff --git a/vendor/github.com/hashicorp/vault/audit/audit.go b/vendor/github.com/hashicorp/vault/audit/audit.go new file mode 100644 index 00000000..6f1e7208 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/audit.go @@ -0,0 +1,53 @@ +package audit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// Backend interface must be implemented for an audit +// mechanism to be made available. Audit backends can be enabled to +// sink information to different backends such as logs, file, databases, +// or other external services. +type Backend interface { + // LogRequest is used to synchronously log a request. This is done after the + // request is authorized but before the request is executed. The arguments + // MUST not be modified in anyway. They should be deep copied if this is + // a possibility. + LogRequest(context.Context, *logical.LogInput) error + + // LogResponse is used to synchronously log a response. This is done after + // the request is processed but before the response is sent. The arguments + // MUST not be modified in anyway. They should be deep copied if this is + // a possibility. + LogResponse(context.Context, *logical.LogInput) error + + // GetHash is used to return the given data with the backend's hash, + // so that a caller can determine if a value in the audit log matches + // an expected plaintext value + GetHash(context.Context, string) (string, error) + + // Reload is called on SIGHUP for supporting backends. + Reload(context.Context) error + + // Invalidate is called for path invalidation + Invalidate(context.Context) +} + +// BackendConfig contains configuration parameters used in the factory func to +// instantiate audit backends +type BackendConfig struct { + // The view to store the salt + SaltView logical.Storage + + // The salt config that should be used for any secret obfuscation + SaltConfig *salt.Config + + // Config is the opaque user configuration provided when mounting + Config map[string]string +} + +// Factory is the factory function to create an audit backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) diff --git a/vendor/github.com/hashicorp/vault/audit/format.go b/vendor/github.com/hashicorp/vault/audit/format.go new file mode 100644 index 00000000..329e7ba7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/format.go @@ -0,0 +1,416 @@ +package audit + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "strings" + "time" + + squarejwt "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +type AuditFormatWriter interface { + // WriteRequest writes the request entry to the writer or returns an error. + WriteRequest(io.Writer, *AuditRequestEntry) error + // WriteResponse writes the response entry to the writer or returns an error. + WriteResponse(io.Writer, *AuditResponseEntry) error + // Salt returns a non-nil salt or an error. + Salt(context.Context) (*salt.Salt, error) +} + +// AuditFormatter implements the Formatter interface, and allows the underlying +// marshaller to be swapped out +type AuditFormatter struct { + AuditFormatWriter +} + +var _ Formatter = (*AuditFormatter)(nil) + +func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { + if in == nil || in.Request == nil { + return fmt.Errorf("request to request-audit a nil request") + } + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + salt, err := f.Salt(ctx) + if err != nil { + return errwrap.Wrapf("error fetching salt: {{err}}", err) + } + + // Set these to the input values at first + auth := in.Auth + req := in.Request + var connState *tls.ConnectionState + if auth == nil { + auth = new(logical.Auth) + } + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + if !config.Raw { + auth, err = HashAuth(salt, auth, config.HMACAccessor) + if err != nil { + return err + } + + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return err + } + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + reqType := in.Type + if reqType == "" { + reqType = "request" + } + reqEntry := &AuditRequestEntry{ + Type: reqType, + Error: errString, + + Auth: &AuditAuth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + EntityID: auth.EntityID, + RemainingUses: req.ClientTokenRemainingUses, + TokenType: auth.TokenType.String(), + }, + + Request: &AuditRequest{ + ID: req.ID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Operation: req.Operation, + Namespace: &AuditNamespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + }, + } + + if req.WrapInfo != nil { + reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !config.OmitTime { + reqEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) + } + + return f.AuditFormatWriter.WriteRequest(w, reqEntry) +} + +func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { + if in == nil || in.Request == nil { + return fmt.Errorf("request to response-audit a nil request") + } + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + salt, err := f.Salt(ctx) + if err != nil { + return errwrap.Wrapf("error fetching salt: {{err}}", err) + } + + // Set these to the input values at first + auth, req, resp := in.Auth, in.Request, in.Response + if auth == nil { + auth = new(logical.Auth) + } + if resp == nil { + resp = new(logical.Response) + } + var connState *tls.ConnectionState + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + if !config.Raw { + auth, err = HashAuth(salt, auth, config.HMACAccessor) + if err != nil { + return err + } + + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return err + } + + resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys) + if err != nil { + return err + } + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + var respAuth *AuditAuth + if resp.Auth != nil { + respAuth = &AuditAuth{ + ClientToken: resp.Auth.ClientToken, + Accessor: resp.Auth.Accessor, + DisplayName: resp.Auth.DisplayName, + Policies: resp.Auth.Policies, + TokenPolicies: resp.Auth.TokenPolicies, + IdentityPolicies: resp.Auth.IdentityPolicies, + ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, + NoDefaultPolicy: resp.Auth.NoDefaultPolicy, + Metadata: resp.Auth.Metadata, + NumUses: resp.Auth.NumUses, + EntityID: resp.Auth.EntityID, + TokenType: resp.Auth.TokenType.String(), + } + } + + var respSecret *AuditSecret + if resp.Secret != nil { + respSecret = &AuditSecret{ + LeaseID: resp.Secret.LeaseID, + } + } + + var respWrapInfo *AuditResponseWrapInfo + if resp.WrapInfo != nil { + token := resp.WrapInfo.Token + if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { + token = *jwtToken + } + respWrapInfo = &AuditResponseWrapInfo{ + TTL: int(resp.WrapInfo.TTL / time.Second), + Token: token, + Accessor: resp.WrapInfo.Accessor, + CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + } + } + + respType := in.Type + if respType == "" { + respType = "response" + } + respEntry := &AuditResponseEntry{ + Type: respType, + Error: errString, + Auth: &AuditAuth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + RemainingUses: req.ClientTokenRemainingUses, + EntityID: auth.EntityID, + TokenType: auth.TokenType.String(), + }, + + Request: &AuditRequest{ + ID: req.ID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Operation: req.Operation, + Namespace: &AuditNamespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + }, + + Response: &AuditResponse{ + Auth: respAuth, + Secret: respSecret, + Data: resp.Data, + Warnings: resp.Warnings, + Redirect: resp.Redirect, + WrapInfo: respWrapInfo, + Headers: resp.Headers, + }, + } + + if req.WrapInfo != nil { + respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !config.OmitTime { + respEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) + } + + return f.AuditFormatWriter.WriteResponse(w, respEntry) +} + +// AuditRequestEntry is the structure of a request audit log entry in Audit. +type AuditRequestEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Error string `json:"error,omitempty"` +} + +// AuditResponseEntry is the structure of a response audit log entry in Audit. +type AuditResponseEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Response *AuditResponse `json:"response,omitempty"` + Error string `json:"error,omitempty"` +} + +type AuditRequest struct { + ID string `json:"id,omitempty"` + ReplicationCluster string `json:"replication_cluster,omitempty"` + Operation logical.Operation `json:"operation,omitempty"` + ClientToken string `json:"client_token,omitempty"` + ClientTokenAccessor string `json:"client_token_accessor,omitempty"` + Namespace *AuditNamespace `json:"namespace,omitempty"` + Path string `json:"path,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + PolicyOverride bool `json:"policy_override,omitempty"` + RemoteAddr string `json:"remote_address,omitempty"` + WrapTTL int `json:"wrap_ttl,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` +} + +type AuditResponse struct { + Auth *AuditAuth `json:"auth,omitempty"` + Secret *AuditSecret `json:"secret,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Redirect string `json:"redirect,omitempty"` + WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` +} + +type AuditAuth struct { + ClientToken string `json:"client_token,omitempty"` + Accessor string `json:"accessor,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Policies []string `json:"policies,omitempty"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + NumUses int `json:"num_uses,omitempty"` + RemainingUses int `json:"remaining_uses,omitempty"` + EntityID string `json:"entity_id,omitempty"` + TokenType string `json:"token_type,omitempty"` +} + +type AuditSecret struct { + LeaseID string `json:"lease_id,omitempty"` +} + +type AuditResponseWrapInfo struct { + TTL int `json:"ttl,omitempty"` + Token string `json:"token,omitempty"` + Accessor string `json:"accessor,omitempty"` + CreationTime string `json:"creation_time,omitempty"` + CreationPath string `json:"creation_path,omitempty"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type AuditNamespace struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` +} + +// getRemoteAddr safely gets the remote address avoiding a nil pointer +func getRemoteAddr(req *logical.Request) string { + if req != nil && req.Connection != nil { + return req.Connection.RemoteAddr + } + return "" +} + +func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { + if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { + return "" + } + + return connState.VerifiedChains[0][0].SerialNumber.String() +} + +// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could +// extract the original token ID from inside +func parseVaultTokenFromJWT(token string) *string { + if strings.Count(token, ".") != 2 { + return nil + } + + parsedJWT, err := squarejwt.ParseSigned(token) + if err != nil { + return nil + } + + var claims squarejwt.Claims + if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { + return nil + } + + return &claims.ID +} diff --git a/vendor/github.com/hashicorp/vault/audit/format_json.go b/vendor/github.com/hashicorp/vault/audit/format_json.go new file mode 100644 index 00000000..4003c05a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/format_json.go @@ -0,0 +1,53 @@ +package audit + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/vault/sdk/helper/salt" +) + +// JSONFormatWriter is an AuditFormatWriter implementation that structures data into +// a JSON format. +type JSONFormatWriter struct { + Prefix string + SaltFunc func(context.Context) (*salt.Salt, error) +} + +func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + enc := json.NewEncoder(w) + return enc.Encode(req) +} + +func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { + if resp == nil { + return fmt.Errorf("response entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + enc := json.NewEncoder(w) + return enc.Encode(resp) +} + +func (f *JSONFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + return f.SaltFunc(ctx) +} diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go new file mode 100644 index 00000000..bff24409 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go @@ -0,0 +1,74 @@ +package audit + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/jefferai/jsonx" +) + +// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into +// a XML format. +type JSONxFormatWriter struct { + Prefix string + SaltFunc func(context.Context) (*salt.Salt, error) +} + +func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + jsonBytes, err := json.Marshal(req) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} + +func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { + if resp == nil { + return fmt.Errorf("response entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + jsonBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} + +func (f *JSONxFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + return f.SaltFunc(ctx) +} diff --git a/vendor/github.com/hashicorp/vault/audit/formatter.go b/vendor/github.com/hashicorp/vault/audit/formatter.go new file mode 100644 index 00000000..c2703576 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/formatter.go @@ -0,0 +1,26 @@ +package audit + +import ( + "context" + "io" + + "github.com/hashicorp/vault/sdk/logical" +) + +// Formatter is an interface that is responsible for formating a +// request/response into some format. Formatters write their output +// to an io.Writer. +// +// It is recommended that you pass data through Hash prior to formatting it. +type Formatter interface { + FormatRequest(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error + FormatResponse(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error +} + +type FormatterConfig struct { + Raw bool + HMACAccessor bool + + // This should only ever be used in a testing context + OmitTime bool +} diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure.go b/vendor/github.com/hashicorp/vault/audit/hashstructure.go new file mode 100644 index 00000000..70db3f26 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/audit/hashstructure.go @@ -0,0 +1,353 @@ +package audit + +import ( + "encoding/json" + "errors" + "reflect" + "time" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// HashString hashes the given opaque string and returns it +func HashString(salter *salt.Salt, data string) string { + return salter.GetIdentifiedHMAC(data) +} + +// HashAuth returns a hashed copy of the logical.Auth input. +func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + auth := *in + + if auth.ClientToken != "" { + auth.ClientToken = fn(auth.ClientToken) + } + if HMACAccessor && auth.Accessor != "" { + auth.Accessor = fn(auth.Accessor) + } + return &auth, nil +} + +// HashRequest returns a hashed copy of the logical.Request input. +func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + req := *in + + if req.Auth != nil { + cp, err := copystructure.Copy(req.Auth) + if err != nil { + return nil, err + } + + req.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + if err != nil { + return nil, err + } + } + + if req.ClientToken != "" { + req.ClientToken = fn(req.ClientToken) + } + if HMACAccessor && req.ClientTokenAccessor != "" { + req.ClientTokenAccessor = fn(req.ClientTokenAccessor) + } + + data, err := hashMap(fn, req.Data, nonHMACDataKeys) + if err != nil { + return nil, err + } + + req.Data = data + return &req, nil +} + +func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKeys []string) (map[string]interface{}, error) { + if data == nil { + return nil, nil + } + + copy, err := copystructure.Copy(data) + if err != nil { + return nil, err + } + newData := copy.(map[string]interface{}) + for k, v := range newData { + if o, ok := v.(logical.OptMarshaler); ok { + marshaled, err := o.MarshalJSONWithOptions(&logical.MarshalOptions{ + ValueHasher: fn, + }) + if err != nil { + return nil, err + } + newData[k] = json.RawMessage(marshaled) + } + } + + if err := HashStructure(newData, fn, nonHMACDataKeys); err != nil { + return nil, err + } + + return newData, nil +} + +// HashResponse returns a hashed copy of the logical.Request input. +func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Response, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + resp := *in + + if resp.Auth != nil { + cp, err := copystructure.Copy(resp.Auth) + if err != nil { + return nil, err + } + + resp.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + if err != nil { + return nil, err + } + } + + data, err := hashMap(fn, resp.Data, nonHMACDataKeys) + if err != nil { + return nil, err + } + resp.Data = data + + if resp.WrapInfo != nil { + var err error + resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) + if err != nil { + return nil, err + } + } + + return &resp, nil +} + +// HashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. +func HashWrapInfo(salter *salt.Salt, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + wrapinfo := *in + + wrapinfo.Token = fn(wrapinfo.Token) + + if HMACAccessor { + wrapinfo.Accessor = fn(wrapinfo.Accessor) + + if wrapinfo.WrappedAccessor != "" { + wrapinfo.WrappedAccessor = fn(wrapinfo.WrappedAccessor) + } + } + + return &wrapinfo, nil +} + +// HashStructure takes an interface and hashes all the values within +// the structure. Only _values_ are hashed: keys of objects are not. +// +// For the HashCallback, see the built-in HashCallbacks below. +func HashStructure(s interface{}, cb HashCallback, ignoredKeys []string) error { + walker := &hashWalker{Callback: cb, IgnoredKeys: ignoredKeys} + return reflectwalk.Walk(s, walker) +} + +// HashCallback is the callback called for HashStructure to hash +// a value. +type HashCallback func(string) string + +// hashWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// replace primitives with a hashed value. +type hashWalker struct { + // Callback is the function to call with the primitive that is + // to be hashed. If there is an error, walking will be halted + // immediately and the error returned. + Callback HashCallback + // IgnoreKeys are the keys that wont have the HashCallback applied + IgnoredKeys []string + // MapElem appends the key itself (not the reflect.Value) to key. + // The last element in key is the most recently entered map key. + // Since Exit pops the last element of key, only nesting to another + // structure increases the size of this slice. + key []string + lastValue reflect.Value + // Enter appends to loc and exit pops loc. The last element of loc is thus + // the current location. + loc []reflectwalk.Location + // Map and Slice append to cs, Exit pops the last element off cs. + // The last element in cs is the most recently entered map or slice. + cs []reflect.Value + // MapElem and SliceElem append to csKey. The last element in csKey is the + // most recently entered map key or slice index. Since Exit pops the last + // element of csKey, only nesting to another structure increases the size of + // this slice. + csKey []reflect.Value +} + +// hashTimeType stores a pre-computed reflect.Type for a time.Time so +// we can quickly compare in hashWalker.Struct. We create an empty/invalid +// time.Time{} so we don't need to incur any additional startup cost vs. +// Now() or Unix(). +var hashTimeType = reflect.TypeOf(time.Time{}) + +func (w *hashWalker) Enter(loc reflectwalk.Location) error { + w.loc = append(w.loc, loc) + return nil +} + +func (w *hashWalker) Exit(loc reflectwalk.Location) error { + w.loc = w.loc[:len(w.loc)-1] + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *hashWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *hashWalker) MapElem(m, k, v reflect.Value) error { + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *hashWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *hashWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + return nil +} + +func (w *hashWalker) Struct(v reflect.Value) error { + // We are looking for time values. If it isn't one, ignore it. + if v.Type() != hashTimeType { + return nil + } + + if len(w.loc) < 3 { + // The last element of w.loc is reflectwalk.Struct, by definition. + // If len(w.loc) < 3 that means hashWalker.Walk was given a struct + // value and this is the very first step in the walk, and we don't + // currently support structs as inputs, + return errors.New("structs as direct inputs not supported") + } + + // Second to last element of w.loc is location that contains this struct. + switch w.loc[len(w.loc)-2] { + case reflectwalk.MapValue: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + m := w.cs[len(w.cs)-1] + mk := w.csKey[len(w.cs)-1] + m.SetMapIndex(mk, reflect.ValueOf(strVal)) + case reflectwalk.SliceElem: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(reflect.ValueOf(strVal)) + } + + // Skip this entry so that we don't walk the struct. + return reflectwalk.SkipEntry +} + +func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error { + return nil +} + +// Primitive calls Callback to transform strings in-place, except for map keys. +// Strings hiding within interfaces are also transformed. +func (w *hashWalker) Primitive(v reflect.Value) error { + if w.Callback == nil { + return nil + } + + // We don't touch map keys + if w.loc[len(w.loc)-1] == reflectwalk.MapKey { + return nil + } + + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + // See if the current key is part of the ignored keys + currentKey := w.key[len(w.key)-1] + if strutil.StrListContains(w.IgnoredKeys, currentKey) { + return nil + } + + replaceVal := w.Callback(v.String()) + + resultVal := reflect.ValueOf(replaceVal) + switch w.loc[len(w.loc)-1] { + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csKey[len(w.cs)-1] + m.SetMapIndex(mk, resultVal) + case reflectwalk.SliceElem: + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go new file mode 100644 index 00000000..2942352e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go @@ -0,0 +1,222 @@ +package plugin + +import ( + "context" + "fmt" + "net/rpc" + "reflect" + "sync" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + bplugin "github.com/hashicorp/vault/sdk/plugin" +) + +var ( + ErrMismatchType = fmt.Errorf("mismatch on mounted backend and plugin backend type") + ErrMismatchPaths = fmt.Errorf("mismatch on mounted backend and plugin backend special paths") +) + +// Factory returns a configured plugin logical.Backend. +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + _, ok := conf.Config["plugin_name"] + if !ok { + return nil, fmt.Errorf("plugin_name not provided") + } + b, err := Backend(ctx, conf) + if err != nil { + return nil, err + } + + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// Backend returns an instance of the backend, either as a plugin if external +// or as a concrete implementation if builtin, casted as logical.Backend. +func Backend(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + var b PluginBackend + + name := conf.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(conf.Config["plugin_type"]) + if err != nil { + return nil, err + } + + sys := conf.System + + // NewBackend with isMetadataMode set to true + raw, err := bplugin.NewBackend(ctx, name, pluginType, sys, conf, true) + if err != nil { + return nil, err + } + err = raw.Setup(ctx, conf) + if err != nil { + return nil, err + } + // Get SpecialPaths and BackendType + paths := raw.SpecialPaths() + btype := raw.Type() + + // Cleanup meta plugin backend + raw.Cleanup(ctx) + + // Initialize b.Backend with dummy backend since plugin + // backends will need to be lazy loaded. + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + } + + b.config = conf + + return &b, nil +} + +// PluginBackend is a thin wrapper around plugin.BackendPluginClient +type PluginBackend struct { + logical.Backend + sync.RWMutex + + config *logical.BackendConfig + + // Used to detect if we already reloaded + canary string + + // Used to detect if plugin is set + loaded bool +} + +// startBackend starts a plugin backend +func (b *PluginBackend) startBackend(ctx context.Context, storage logical.Storage) error { + pluginName := b.config.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(b.config.Config["plugin_type"]) + if err != nil { + return err + } + + // Ensure proper cleanup of the backend (i.e. call client.Kill()) + b.Backend.Cleanup(ctx) + + nb, err := bplugin.NewBackend(ctx, pluginName, pluginType, b.config.System, b.config, false) + if err != nil { + return err + } + err = nb.Setup(ctx, b.config) + if err != nil { + return err + } + + // If the backend has not been loaded (i.e. still in metadata mode), + // check if type and special paths still matches + if !b.loaded { + if b.Backend.Type() != nb.Type() { + nb.Cleanup(ctx) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType) + return ErrMismatchType + } + if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) { + nb.Cleanup(ctx) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths) + return ErrMismatchPaths + } + } + + b.Backend = nb + b.loaded = true + + // call Initialize() explicitly here. + return b.Backend.Initialize(ctx, &logical.InitializationRequest{ + Storage: storage, + }) +} + +// lazyLoad lazy-loads the backend before running a method +func (b *PluginBackend) lazyLoadBackend(ctx context.Context, storage logical.Storage, methodWrapper func() error) error { + b.RLock() + canary := b.canary + + // Lazy-load backend + if !b.loaded { + // Upgrade lock + b.RUnlock() + b.Lock() + // Check once more after lock swap + if !b.loaded { + err := b.startBackend(ctx, storage) + if err != nil { + b.Unlock() + return err + } + } + b.Unlock() + b.RLock() + } + + err := methodWrapper() + b.RUnlock() + + // Need to compare string value for case were err comes from plugin RPC + // and is returned as plugin.BasicError type. + if err != nil && + (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { + // Reload plugin if it's an rpc.ErrShutdown + b.Lock() + if b.canary == canary { + b.Logger().Debug("reloading plugin backend", "plugin", b.config.Config["plugin_name"]) + err := b.startBackend(ctx, storage) + if err != nil { + b.Unlock() + return err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.Unlock() + return err + } + } + b.Unlock() + + // Try once more + b.RLock() + defer b.RUnlock() + return methodWrapper() + } + return err +} + +// HandleRequest is a thin wrapper implementation of HandleRequest that includes +// automatic plugin reload. +func (b *PluginBackend) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { + + err = b.lazyLoadBackend(ctx, req.Storage, func() error { + var merr error + resp, merr = b.Backend.HandleRequest(ctx, req) + return merr + }) + + return +} + +// HandleExistenceCheck is a thin wrapper implementation of HandleExistenceCheck +// that includes automatic plugin reload. +func (b *PluginBackend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (checkFound bool, exists bool, err error) { + + err = b.lazyLoadBackend(ctx, req.Storage, func() error { + var merr error + checkFound, exists, merr = b.Backend.HandleExistenceCheck(ctx, req) + return merr + }) + + return +} + +// Initialize is intentionally a no-op here, the backend will instead be +// initialized when it is lazily loaded. +func (b *PluginBackend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + return nil +} diff --git a/vendor/github.com/hashicorp/vault/command/server/config.go b/vendor/github.com/hashicorp/vault/command/server/config.go new file mode 100644 index 00000000..41f9150f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/command/server/config.go @@ -0,0 +1,1055 @@ +package server + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +const ( + prometheusDefaultRetentionTime = 24 * time.Hour +) + +// Config is the configuration for the vault server. +type Config struct { + Listeners []*Listener `hcl:"-"` + Storage *Storage `hcl:"-"` + HAStorage *Storage `hcl:"-"` + + Seals []*Seal `hcl:"-"` + Entropy *Entropy `hcl:"-"` + + CacheSize int `hcl:"cache_size"` + DisableCache bool `hcl:"-"` + DisableCacheRaw interface{} `hcl:"disable_cache"` + DisableMlock bool `hcl:"-"` + DisableMlockRaw interface{} `hcl:"disable_mlock"` + DisablePrintableCheck bool `hcl:"-"` + DisablePrintableCheckRaw interface{} `hcl:"disable_printable_check"` + + EnableUI bool `hcl:"-"` + EnableUIRaw interface{} `hcl:"ui"` + + Telemetry *Telemetry `hcl:"telemetry"` + + MaxLeaseTTL time.Duration `hcl:"-"` + MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl"` + DefaultLeaseTTL time.Duration `hcl:"-"` + DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"` + + DefaultMaxRequestDuration time.Duration `hcl:"-"` + DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"` + + ClusterName string `hcl:"cluster_name"` + ClusterCipherSuites string `hcl:"cluster_cipher_suites"` + + PluginDirectory string `hcl:"plugin_directory"` + + LogLevel string `hcl:"log_level"` + + // LogFormat specifies the log format. Valid values are "standard" and "json". The values are case-insenstive. + // If no log format is specified, then standard format will be used. + LogFormat string `hcl:"log_format"` + + PidFile string `hcl:"pid_file"` + EnableRawEndpoint bool `hcl:"-"` + EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"` + + APIAddr string `hcl:"api_addr"` + ClusterAddr string `hcl:"cluster_addr"` + DisableClustering bool `hcl:"-"` + DisableClusteringRaw interface{} `hcl:"disable_clustering"` + + DisablePerformanceStandby bool `hcl:"-"` + DisablePerformanceStandbyRaw interface{} `hcl:"disable_performance_standby"` + + DisableSealWrap bool `hcl:"-"` + DisableSealWrapRaw interface{} `hcl:"disable_sealwrap"` + + DisableIndexing bool `hcl:"-"` + DisableIndexingRaw interface{} `hcl:"disable_indexing"` +} + +// DevConfig is a Config that is used for dev mode of Vault. +func DevConfig(storageType string) *Config { + ret := &Config{ + DisableMlock: true, + EnableRawEndpoint: true, + + Storage: &Storage{ + Type: storageType, + }, + + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:8200", + "tls_disable": true, + "proxy_protocol_behavior": "allow_authorized", + "proxy_protocol_authorized_addrs": "127.0.0.1:8200", + }, + }, + }, + + EnableUI: true, + + Telemetry: &Telemetry{ + PrometheusRetentionTime: prometheusDefaultRetentionTime, + DisableHostname: true, + }, + } + + return ret +} + +// Listener is the listener configuration for the server. +type Listener struct { + Type string + Config map[string]interface{} +} + +func (l *Listener) GoString() string { + return fmt.Sprintf("*%#v", *l) +} + +// Entropy contains Entropy configuration for the server +type EntropyMode int + +const ( + Unknown EntropyMode = iota + Augmentation +) + +type Entropy struct { + Mode EntropyMode +} + +// Storage is the underlying storage configuration for the server. +type Storage struct { + Type string + RedirectAddr string + ClusterAddr string + DisableClustering bool + Config map[string]string +} + +func (b *Storage) GoString() string { + return fmt.Sprintf("*%#v", *b) +} + +// Seal contains Seal configuration for the server +type Seal struct { + Type string + Disabled bool + Config map[string]string +} + +func (h *Seal) GoString() string { + return fmt.Sprintf("*%#v", *h) +} + +// Telemetry is the telemetry configuration for the server +type Telemetry struct { + StatsiteAddr string `hcl:"statsite_address"` + StatsdAddr string `hcl:"statsd_address"` + + DisableHostname bool `hcl:"disable_hostname"` + EnableHostnameLabel bool `hcl:"enable_hostname_label"` + + // Circonus: see https://github.com/circonus-labs/circonus-gometrics + // for more details on the various configuration options. + // Valid configuration combinations: + // - CirconusAPIToken + // metric management enabled (search for existing check or create a new one) + // - CirconusSubmissionUrl + // metric management disabled (use check with specified submission_url, + // broker must be using a public SSL certificate) + // - CirconusAPIToken + CirconusCheckSubmissionURL + // metric management enabled (use check with specified submission_url) + // - CirconusAPIToken + CirconusCheckID + // metric management enabled (use check with specified id) + + // CirconusAPIToken is a valid API Token used to create/manage check. If provided, + // metric management is enabled. + // Default: none + CirconusAPIToken string `hcl:"circonus_api_token"` + // CirconusAPIApp is an app name associated with API token. + // Default: "consul" + CirconusAPIApp string `hcl:"circonus_api_app"` + // CirconusAPIURL is the base URL to use for contacting the Circonus API. + // Default: "https://api.circonus.com/v2" + CirconusAPIURL string `hcl:"circonus_api_url"` + // CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus. + // Default: 10s + CirconusSubmissionInterval string `hcl:"circonus_submission_interval"` + // CirconusCheckSubmissionURL is the check.config.submission_url field from a + // previously created HTTPTRAP check. + // Default: none + CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"` + // CirconusCheckID is the check id (not check bundle id) from a previously created + // HTTPTRAP check. The numeric portion of the check._cid field. + // Default: none + CirconusCheckID string `hcl:"circonus_check_id"` + // CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered, + // if the metric already exists and is NOT active. If check management is enabled, the default + // behavior is to add new metrics as they are encountered. If the metric already exists in the + // check, it will *NOT* be activated. This setting overrides that behavior. + // Default: "false" + CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"` + // CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance". + // It can be used to maintain metric continuity with transient or ephemeral instances as + // they move around within an infrastructure. + // Default: hostname:app + CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"` + // CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to + // narrow down the search results when neither a Submission URL or Check ID is provided. + // Default: service:app (e.g. service:consul) + CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"` + // CirconusCheckTags is a comma separated list of tags to apply to the check. Note that + // the value of CirconusCheckSearchTag will always be added to the check. + // Default: none + CirconusCheckTags string `hcl:"circonus_check_tags"` + // CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI. + // Default: value of CirconusCheckInstanceID + CirconusCheckDisplayName string `hcl:"circonus_check_display_name"` + // CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion + // of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID + // is provided, an attempt will be made to search for an existing check using Instance ID and + // Search Tag. If one is not found, a new HTTPTRAP check will be created. + // Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated + // with the specified API token or the default Circonus Broker. + // Default: none + CirconusBrokerID string `hcl:"circonus_broker_id"` + // CirconusBrokerSelectTag is a special tag which will be used to select a broker when + // a Broker ID is not provided. The best use of this is to as a hint for which broker + // should be used based on *where* this particular instance is running. + // (e.g. a specific geo location or datacenter, dc:sfo) + // Default: none + CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"` + + // Dogstats: + // DogStatsdAddr is the address of a dogstatsd instance. If provided, + // metrics will be sent to that instance + DogStatsDAddr string `hcl:"dogstatsd_addr"` + + // DogStatsdTags are the global tags that should be sent with each packet to dogstatsd + // It is a list of strings, where each string looks like "my_tag_name:my_tag_value" + DogStatsDTags []string `hcl:"dogstatsd_tags"` + + // Prometheus: + // PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0. + // Default: 24h + PrometheusRetentionTime time.Duration `hcl:"-"` + PrometheusRetentionTimeRaw interface{} `hcl:"prometheus_retention_time"` + + // Stackdriver: + // StackdriverProjectID is the project to publish stackdriver metrics to. + StackdriverProjectID string `hcl:"stackdriver_project_id"` + // StackdriverLocation is the GCP or AWS region of the monitored resource. + StackdriverLocation string `hcl:"stackdriver_location"` + // StackdriverNamespace is the namespace identifier, such as a cluster name. + StackdriverNamespace string `hcl:"stackdriver_namespace"` +} + +func (s *Telemetry) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Merge merges two configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := new(Config) + for _, l := range c.Listeners { + result.Listeners = append(result.Listeners, l) + } + for _, l := range c2.Listeners { + result.Listeners = append(result.Listeners, l) + } + + result.Storage = c.Storage + if c2.Storage != nil { + result.Storage = c2.Storage + } + + result.HAStorage = c.HAStorage + if c2.HAStorage != nil { + result.HAStorage = c2.HAStorage + } + + result.Entropy = c.Entropy + if c2.Entropy != nil { + result.Entropy = c2.Entropy + } + + for _, s := range c.Seals { + result.Seals = append(result.Seals, s) + } + for _, s := range c2.Seals { + result.Seals = append(result.Seals, s) + } + + result.Telemetry = c.Telemetry + if c2.Telemetry != nil { + result.Telemetry = c2.Telemetry + } + + result.CacheSize = c.CacheSize + if c2.CacheSize != 0 { + result.CacheSize = c2.CacheSize + } + + // merging these booleans via an OR operation + result.DisableCache = c.DisableCache + if c2.DisableCache { + result.DisableCache = c2.DisableCache + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + result.DisablePrintableCheck = c.DisablePrintableCheck + if c2.DisablePrintableCheckRaw != nil { + result.DisablePrintableCheck = c2.DisablePrintableCheck + } + + // merge these integers via a MAX operation + result.MaxLeaseTTL = c.MaxLeaseTTL + if c2.MaxLeaseTTL > result.MaxLeaseTTL { + result.MaxLeaseTTL = c2.MaxLeaseTTL + } + + result.DefaultLeaseTTL = c.DefaultLeaseTTL + if c2.DefaultLeaseTTL > result.DefaultLeaseTTL { + result.DefaultLeaseTTL = c2.DefaultLeaseTTL + } + + result.DefaultMaxRequestDuration = c.DefaultMaxRequestDuration + if c2.DefaultMaxRequestDuration > result.DefaultMaxRequestDuration { + result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration + } + + result.LogLevel = c.LogLevel + if c2.LogLevel != "" { + result.LogLevel = c2.LogLevel + } + + result.LogFormat = c.LogFormat + if c2.LogFormat != "" { + result.LogFormat = c2.LogFormat + } + + result.ClusterName = c.ClusterName + if c2.ClusterName != "" { + result.ClusterName = c2.ClusterName + } + + result.ClusterCipherSuites = c.ClusterCipherSuites + if c2.ClusterCipherSuites != "" { + result.ClusterCipherSuites = c2.ClusterCipherSuites + } + + result.EnableUI = c.EnableUI + if c2.EnableUI { + result.EnableUI = c2.EnableUI + } + + result.EnableRawEndpoint = c.EnableRawEndpoint + if c2.EnableRawEndpoint { + result.EnableRawEndpoint = c2.EnableRawEndpoint + } + + result.APIAddr = c.APIAddr + if c2.APIAddr != "" { + result.APIAddr = c2.APIAddr + } + + result.ClusterAddr = c.ClusterAddr + if c2.ClusterAddr != "" { + result.ClusterAddr = c2.ClusterAddr + } + + // Retain raw value so that it can be assigned to storage objects + result.DisableClustering = c.DisableClustering + result.DisableClusteringRaw = c.DisableClusteringRaw + if c2.DisableClusteringRaw != nil { + result.DisableClustering = c2.DisableClustering + result.DisableClusteringRaw = c2.DisableClusteringRaw + } + + result.PluginDirectory = c.PluginDirectory + if c2.PluginDirectory != "" { + result.PluginDirectory = c2.PluginDirectory + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + result.DisablePerformanceStandby = c.DisablePerformanceStandby + if c2.DisablePerformanceStandby { + result.DisablePerformanceStandby = c2.DisablePerformanceStandby + } + + result.DisableSealWrap = c.DisableSealWrap + if c2.DisableSealWrap { + result.DisableSealWrap = c2.DisableSealWrap + } + + result.DisableIndexing = c.DisableIndexing + if c2.DisableIndexing { + result.DisableIndexing = c2.DisableIndexing + } + + // Use values from top-level configuration for storage if set + if storage := result.Storage; storage != nil { + if result.APIAddr != "" { + storage.RedirectAddr = result.APIAddr + } + if result.ClusterAddr != "" { + storage.ClusterAddr = result.ClusterAddr + } + if result.DisableClusteringRaw != nil { + storage.DisableClustering = result.DisableClustering + } + } + + if haStorage := result.HAStorage; haStorage != nil { + if result.APIAddr != "" { + haStorage.RedirectAddr = result.APIAddr + } + if result.ClusterAddr != "" { + haStorage.ClusterAddr = result.ClusterAddr + } + if result.DisableClusteringRaw != nil { + haStorage.DisableClustering = result.DisableClustering + } + } + + return result +} + +// LoadConfig loads the configuration at the given path, regardless if +// its a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigFile loads the configuration from the given file. +func LoadConfigFile(path string) (*Config, error) { + // Read the file + d, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return ParseConfig(string(d)) +} + +func ParseConfig(d string) (*Config, error) { + // Parse! + obj, err := hcl.Parse(d) + if err != nil { + return nil, err + } + + // Start building the result + var result Config + if err := hcl.DecodeObject(&result, obj); err != nil { + return nil, err + } + + if result.MaxLeaseTTLRaw != nil { + if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil { + return nil, err + } + } + if result.DefaultLeaseTTLRaw != nil { + if result.DefaultLeaseTTL, err = parseutil.ParseDurationSecond(result.DefaultLeaseTTLRaw); err != nil { + return nil, err + } + } + + if result.DefaultMaxRequestDurationRaw != nil { + if result.DefaultMaxRequestDuration, err = parseutil.ParseDurationSecond(result.DefaultMaxRequestDurationRaw); err != nil { + return nil, err + } + } + + if result.EnableUIRaw != nil { + if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil { + return nil, err + } + } + + if result.DisableCacheRaw != nil { + if result.DisableCache, err = parseutil.ParseBool(result.DisableCacheRaw); err != nil { + return nil, err + } + } + + if result.DisableMlockRaw != nil { + if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil { + return nil, err + } + } + + if result.DisablePrintableCheckRaw != nil { + if result.DisablePrintableCheck, err = parseutil.ParseBool(result.DisablePrintableCheckRaw); err != nil { + return nil, err + } + } + + if result.EnableRawEndpointRaw != nil { + if result.EnableRawEndpoint, err = parseutil.ParseBool(result.EnableRawEndpointRaw); err != nil { + return nil, err + } + } + + if result.DisableClusteringRaw != nil { + if result.DisableClustering, err = parseutil.ParseBool(result.DisableClusteringRaw); err != nil { + return nil, err + } + } + + if result.DisablePerformanceStandbyRaw != nil { + if result.DisablePerformanceStandby, err = parseutil.ParseBool(result.DisablePerformanceStandbyRaw); err != nil { + return nil, err + } + } + + if result.DisableSealWrapRaw != nil { + if result.DisableSealWrap, err = parseutil.ParseBool(result.DisableSealWrapRaw); err != nil { + return nil, err + } + } + + if result.DisableIndexingRaw != nil { + if result.DisableIndexing, err = parseutil.ParseBool(result.DisableIndexingRaw); err != nil { + return nil, err + } + } + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + // Look for storage but still support old backend + if o := list.Filter("storage"); len(o.Items) > 0 { + if err := ParseStorage(&result, o, "storage"); err != nil { + return nil, errwrap.Wrapf("error parsing 'storage': {{err}}", err) + } + } else { + if o := list.Filter("backend"); len(o.Items) > 0 { + if err := ParseStorage(&result, o, "backend"); err != nil { + return nil, errwrap.Wrapf("error parsing 'backend': {{err}}", err) + } + } + } + + if o := list.Filter("ha_storage"); len(o.Items) > 0 { + if err := parseHAStorage(&result, o, "ha_storage"); err != nil { + return nil, errwrap.Wrapf("error parsing 'ha_storage': {{err}}", err) + } + } else { + if o := list.Filter("ha_backend"); len(o.Items) > 0 { + if err := parseHAStorage(&result, o, "ha_backend"); err != nil { + return nil, errwrap.Wrapf("error parsing 'ha_backend': {{err}}", err) + } + } + } + + if o := list.Filter("hsm"); len(o.Items) > 0 { + if err := parseSeals(&result, o, "hsm"); err != nil { + return nil, errwrap.Wrapf("error parsing 'hsm': {{err}}", err) + } + } + + if o := list.Filter("seal"); len(o.Items) > 0 { + if err := parseSeals(&result, o, "seal"); err != nil { + return nil, errwrap.Wrapf("error parsing 'seal': {{err}}", err) + } + } + + if o := list.Filter("entropy"); len(o.Items) > 0 { + if err := parseEntropy(&result, o, "entropy"); err != nil { + return nil, errwrap.Wrapf("error parsing 'entropy': {{err}}", err) + } + } + + if o := list.Filter("listener"); len(o.Items) > 0 { + if err := parseListeners(&result, o); err != nil { + return nil, errwrap.Wrapf("error parsing 'listener': {{err}}", err) + } + } + + if o := list.Filter("telemetry"); len(o.Items) > 0 { + if err := parseTelemetry(&result, o); err != nil { + return nil, errwrap.Wrapf("error parsing 'telemetry': {{err}}", err) + } + } + + return &result, nil +} + +// LoadConfigDir loads all the configurations in the given directory +// in alphabetical order. +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + var result *Config + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error loading %q: {{err}}", f), err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +func ParseStorage(result *Config, list *ast.ObjectList, name string) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one %q block is permitted", name) + } + + // Get our item + item := list.Items[0] + + key := name + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + + // Pull out the redirect address since it's common to all backends + var redirectAddr string + if v, ok := m["redirect_addr"]; ok { + redirectAddr = v + delete(m, "redirect_addr") + } else if v, ok := m["advertise_addr"]; ok { + redirectAddr = v + delete(m, "advertise_addr") + } + + // Pull out the cluster address since it's common to all backends + var clusterAddr string + if v, ok := m["cluster_addr"]; ok { + clusterAddr = v + delete(m, "cluster_addr") + } + + var disableClustering bool + var err error + if v, ok := m["disable_clustering"]; ok { + disableClustering, err = strconv.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + delete(m, "disable_clustering") + } + + // Override with top-level values if they are set + if result.APIAddr != "" { + redirectAddr = result.APIAddr + } + + if result.ClusterAddr != "" { + clusterAddr = result.ClusterAddr + } + + if result.DisableClusteringRaw != nil { + disableClustering = result.DisableClustering + } + + result.Storage = &Storage{ + RedirectAddr: redirectAddr, + ClusterAddr: clusterAddr, + DisableClustering: disableClustering, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseHAStorage(result *Config, list *ast.ObjectList, name string) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one %q block is permitted", name) + } + + // Get our item + item := list.Items[0] + + key := name + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + + // Pull out the redirect address since it's common to all backends + var redirectAddr string + if v, ok := m["redirect_addr"]; ok { + redirectAddr = v + delete(m, "redirect_addr") + } else if v, ok := m["advertise_addr"]; ok { + redirectAddr = v + delete(m, "advertise_addr") + } + + // Pull out the cluster address since it's common to all backends + var clusterAddr string + if v, ok := m["cluster_addr"]; ok { + clusterAddr = v + delete(m, "cluster_addr") + } + + var disableClustering bool + var err error + if v, ok := m["disable_clustering"]; ok { + disableClustering, err = strconv.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + delete(m, "disable_clustering") + } + + // Override with top-level values if they are set + if result.APIAddr != "" { + redirectAddr = result.APIAddr + } + + if result.ClusterAddr != "" { + clusterAddr = result.ClusterAddr + } + + if result.DisableClusteringRaw != nil { + disableClustering = result.DisableClustering + } + + result.HAStorage = &Storage{ + RedirectAddr: redirectAddr, + ClusterAddr: clusterAddr, + DisableClustering: disableClustering, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseSeals(result *Config, list *ast.ObjectList, blockName string) error { + if len(list.Items) > 2 { + return fmt.Errorf("only two or less %q blocks are permitted", blockName) + } + + seals := make([]*Seal, 0, len(list.Items)) + for _, item := range list.Items { + key := "seal" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("seal.%s:", key)) + } + var disabled bool + var err error + if v, ok := m["disabled"]; ok { + disabled, err = strconv.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key)) + } + delete(m, "disabled") + } + seals = append(seals, &Seal{ + Type: strings.ToLower(key), + Disabled: disabled, + Config: m, + }) + } + + if len(seals) == 2 && + (seals[0].Disabled && seals[1].Disabled || !seals[0].Disabled && !seals[1].Disabled) { + return errors.New("seals: two seals provided but both are disabled or neither are disabled") + } + + result.Seals = seals + + return nil +} + +func parseListeners(result *Config, list *ast.ObjectList) error { + listeners := make([]*Listener, 0, len(list.Items)) + for _, item := range list.Items { + key := "listener" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]interface{} + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key)) + } + + lnType := strings.ToLower(key) + + listeners = append(listeners, &Listener{ + Type: lnType, + Config: m, + }) + } + + result.Listeners = listeners + return nil +} + +func parseTelemetry(result *Config, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'telemetry' block is permitted") + } + + // Get our one item + item := list.Items[0] + + var t Telemetry + if err := hcl.DecodeObject(&t, item.Val); err != nil { + return multierror.Prefix(err, "telemetry:") + } + + if result.Telemetry == nil { + result.Telemetry = &Telemetry{} + } + + if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil { + return multierror.Prefix(err, "telemetry:") + } + + if result.Telemetry.PrometheusRetentionTimeRaw != nil { + var err error + if result.Telemetry.PrometheusRetentionTime, err = parseutil.ParseDurationSecond(result.Telemetry.PrometheusRetentionTimeRaw); err != nil { + return err + } + } else { + result.Telemetry.PrometheusRetentionTime = prometheusDefaultRetentionTime + } + + return nil +} + +// Sanitized returns a copy of the config with all values that are considered +// sensitive stripped. It also strips all `*Raw` values that are mainly +// used for parsing. +// +// Specifically, the fields that this method strips are: +// - Storage.Config +// - HAStorage.Config +// - Seals.Config +// - Telemetry.CirconusAPIToken +func (c *Config) Sanitized() map[string]interface{} { + result := map[string]interface{}{ + "cache_size": c.CacheSize, + "disable_cache": c.DisableCache, + "disable_mlock": c.DisableMlock, + "disable_printable_check": c.DisablePrintableCheck, + + "enable_ui": c.EnableUI, + + "max_lease_ttl": c.MaxLeaseTTL, + "default_lease_ttl": c.DefaultLeaseTTL, + + "default_max_request_duration": c.DefaultMaxRequestDuration, + + "cluster_name": c.ClusterName, + "cluster_cipher_suites": c.ClusterCipherSuites, + + "plugin_directory": c.PluginDirectory, + + "log_level": c.LogLevel, + "log_format": c.LogFormat, + + "pid_file": c.PidFile, + "raw_storage_endpoint": c.EnableRawEndpoint, + + "api_addr": c.APIAddr, + "cluster_addr": c.ClusterAddr, + "disable_clustering": c.DisableClustering, + + "disable_performance_standby": c.DisablePerformanceStandby, + + "disable_sealwrap": c.DisableSealWrap, + + "disable_indexing": c.DisableIndexing, + } + + // Sanitize listeners + if len(c.Listeners) != 0 { + var sanitizedListeners []interface{} + for _, ln := range c.Listeners { + cleanLn := map[string]interface{}{ + "type": ln.Type, + "config": ln.Config, + } + sanitizedListeners = append(sanitizedListeners, cleanLn) + } + result["listeners"] = sanitizedListeners + } + + // Sanitize storage stanza + if c.Storage != nil { + sanitizedStorage := map[string]interface{}{ + "type": c.Storage.Type, + "redirect_addr": c.Storage.RedirectAddr, + "cluster_addr": c.Storage.ClusterAddr, + "disable_clustering": c.Storage.DisableClustering, + } + result["storage"] = sanitizedStorage + } + + // Sanitize HA storage stanza + if c.HAStorage != nil { + sanitizedHAStorage := map[string]interface{}{ + "type": c.HAStorage.Type, + "redirect_addr": c.HAStorage.RedirectAddr, + "cluster_addr": c.HAStorage.ClusterAddr, + "disable_clustering": c.HAStorage.DisableClustering, + } + result["ha_storage"] = sanitizedHAStorage + } + + // Sanitize seals stanza + if len(c.Seals) != 0 { + var sanitizedSeals []interface{} + for _, s := range c.Seals { + cleanSeal := map[string]interface{}{ + "type": s.Type, + "disabled": s.Disabled, + } + sanitizedSeals = append(sanitizedSeals, cleanSeal) + } + result["seals"] = sanitizedSeals + } + + // Sanitize telemetry stanza + if c.Telemetry != nil { + sanitizedTelemetry := map[string]interface{}{ + "statsite_address": c.Telemetry.StatsiteAddr, + "statsd_address": c.Telemetry.StatsdAddr, + "disable_hostname": c.Telemetry.DisableHostname, + "circonus_api_token": "", + "circonus_api_app": c.Telemetry.CirconusAPIApp, + "circonus_api_url": c.Telemetry.CirconusAPIURL, + "circonus_submission_interval": c.Telemetry.CirconusSubmissionInterval, + "circonus_submission_url": c.Telemetry.CirconusCheckSubmissionURL, + "circonus_check_id": c.Telemetry.CirconusCheckID, + "circonus_check_force_metric_activation": c.Telemetry.CirconusCheckForceMetricActivation, + "circonus_check_instance_id": c.Telemetry.CirconusCheckInstanceID, + "circonus_check_search_tag": c.Telemetry.CirconusCheckSearchTag, + "circonus_check_tags": c.Telemetry.CirconusCheckTags, + "circonus_check_display_name": c.Telemetry.CirconusCheckDisplayName, + "circonus_broker_id": c.Telemetry.CirconusBrokerID, + "circonus_broker_select_tag": c.Telemetry.CirconusBrokerSelectTag, + "dogstatsd_addr": c.Telemetry.DogStatsDAddr, + "dogstatsd_tags": c.Telemetry.DogStatsDTags, + "prometheus_retention_time": c.Telemetry.PrometheusRetentionTime, + "stackdriver_project_id": c.Telemetry.StackdriverProjectID, + "stackdriver_location": c.Telemetry.StackdriverLocation, + "stackdriver_namespace": c.Telemetry.StackdriverNamespace, + } + result["telemetry"] = sanitizedTelemetry + } + + return result +} diff --git a/vendor/github.com/hashicorp/vault/command/server/config_test_helpers.go b/vendor/github.com/hashicorp/vault/command/server/config_test_helpers.go new file mode 100644 index 00000000..12fff634 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/command/server/config_test_helpers.go @@ -0,0 +1,559 @@ +package server + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" +) + +func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) { + config, err := LoadConfigFile("./test-fixtures/config2.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + RedirectAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + RedirectAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + Telemetry: &Telemetry{ + StatsdAddr: "bar", + StatsiteAddr: "foo", + DisableHostname: false, + DogStatsDAddr: "127.0.0.1:7254", + DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"}, + PrometheusRetentionTime: 30 * time.Second, + PrometheusRetentionTimeRaw: "30s", + }, + + DisableCache: true, + DisableCacheRaw: true, + DisableMlock: true, + DisableMlockRaw: true, + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + ClusterName: "testcluster", + + PidFile: "./pidfile", + + APIAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + } + if entropy != nil { + expected.Entropy = entropy + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) + } +} + +func testLoadConfigFile_json2(t *testing.T, entropy *Entropy) { + config, err := LoadConfigFile("./test-fixtures/config2.hcl.json") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + }, + }, + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:444", + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + CacheSize: 45678, + + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + + Telemetry: &Telemetry{ + StatsiteAddr: "foo", + StatsdAddr: "bar", + DisableHostname: true, + CirconusAPIToken: "0", + CirconusAPIApp: "vault", + CirconusAPIURL: "http://api.circonus.com/v2", + CirconusSubmissionInterval: "10s", + CirconusCheckSubmissionURL: "https://someplace.com/metrics", + CirconusCheckID: "0", + CirconusCheckForceMetricActivation: "true", + CirconusCheckInstanceID: "node1:vault", + CirconusCheckSearchTag: "service:vault", + CirconusCheckDisplayName: "node1:vault", + CirconusCheckTags: "cat1:tag1,cat2:tag2", + CirconusBrokerID: "0", + CirconusBrokerSelectTag: "dc:sfo", + PrometheusRetentionTime: 30 * time.Second, + PrometheusRetentionTimeRaw: "30s", + }, + } + if entropy != nil { + expected.Entropy = entropy + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) + } +} + +func testParseEntropy(t *testing.T, oss bool) { + var tests = []struct { + inConfig string + outErr error + outEntropy Entropy + }{ + { + inConfig: `entropy "seal" { + mode = "augmentation" + }`, + outErr: nil, + outEntropy: Entropy{Augmentation}, + }, + { + inConfig: `entropy "seal" { + mode = "a_mode_that_is_not_supported" + }`, + outErr: fmt.Errorf("the specified entropy mode %q is not supported", "a_mode_that_is_not_supported"), + }, + { + inConfig: `entropy "device_that_is_not_supported" { + mode = "augmentation" + }`, + outErr: fmt.Errorf("only the %q type of external entropy is supported", "seal"), + }, + { + inConfig: `entropy "seal" { + mode = "augmentation" + } + entropy "seal" { + mode = "augmentation" + }`, + outErr: fmt.Errorf("only one %q block is permitted", "entropy"), + }, + } + + var config Config + + for _, test := range tests { + obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig)) + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("entropy") + err := parseEntropy(&config, objList, "entropy") + // validate the error, both should be nil or have the same Error() + switch { + case oss: + if config.Entropy != nil { + t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy) + } + case err != nil && test.outErr != nil: + if err.Error() != test.outErr.Error() { + t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) + } + case err != test.outErr: + t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) + case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy: + fmt.Printf("\n config.Entropy: %#v", config.Entropy) + t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy) + } + } +} + +func testLoadConfigFile(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + RedirectAddr: "foo", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + RedirectAddr: "snafu", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + Telemetry: &Telemetry{ + StatsdAddr: "bar", + StatsiteAddr: "foo", + DisableHostname: false, + DogStatsDAddr: "127.0.0.1:7254", + DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"}, + PrometheusRetentionTime: prometheusDefaultRetentionTime, + }, + + DisableCache: true, + DisableCacheRaw: true, + DisableMlock: true, + DisableMlockRaw: true, + DisablePrintableCheckRaw: true, + DisablePrintableCheck: true, + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + + Entropy: nil, + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + ClusterName: "testcluster", + + PidFile: "./pidfile", + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) + } +} + +func testLoadConfigFile_json(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config.hcl.json") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + DisableClustering: true, + }, + + ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + + Telemetry: &Telemetry{ + StatsiteAddr: "baz", + StatsdAddr: "", + DisableHostname: false, + CirconusAPIToken: "", + CirconusAPIApp: "", + CirconusAPIURL: "", + CirconusSubmissionInterval: "", + CirconusCheckSubmissionURL: "", + CirconusCheckID: "", + CirconusCheckForceMetricActivation: "", + CirconusCheckInstanceID: "", + CirconusCheckSearchTag: "", + CirconusCheckDisplayName: "", + CirconusCheckTags: "", + CirconusBrokerID: "", + CirconusBrokerSelectTag: "", + PrometheusRetentionTime: prometheusDefaultRetentionTime, + }, + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + ClusterName: "testcluster", + DisableCacheRaw: interface{}(nil), + DisableMlockRaw: interface{}(nil), + EnableUI: true, + EnableUIRaw: true, + PidFile: "./pidfile", + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + DisableSealWrap: true, + DisableSealWrapRaw: true, + Entropy: nil, + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) + } +} + +func testLoadConfigDir(t *testing.T) { + config, err := LoadConfigDir("./test-fixtures/config-dir") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + DisableCache: true, + DisableMlock: true, + + DisableClustering: false, + DisableClusteringRaw: false, + + APIAddr: "https://vault.local", + ClusterAddr: "https://127.0.0.1:444", + + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + RedirectAddr: "https://vault.local", + ClusterAddr: "https://127.0.0.1:444", + DisableClustering: false, + }, + + EnableUI: true, + + EnableRawEndpoint: true, + + Telemetry: &Telemetry{ + StatsiteAddr: "qux", + StatsdAddr: "baz", + DisableHostname: true, + PrometheusRetentionTime: prometheusDefaultRetentionTime, + }, + + MaxLeaseTTL: 10 * time.Hour, + DefaultLeaseTTL: 10 * time.Hour, + ClusterName: "testcluster", + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) + } +} + +func testConfig_Sanitized(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config3.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + sanitizedConfig := config.Sanitized() + + expected := map[string]interface{}{ + "api_addr": "top_level_api_addr", + "cache_size": 0, + "cluster_addr": "top_level_cluster_addr", + "cluster_cipher_suites": "", + "cluster_name": "testcluster", + "default_lease_ttl": 10 * time.Hour, + "default_max_request_duration": 0 * time.Second, + "disable_cache": true, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": true, + "disable_performance_standby": false, + "disable_printable_check": false, + "disable_sealwrap": true, + "raw_storage_endpoint": true, + "enable_ui": true, + "ha_storage": map[string]interface{}{ + "cluster_addr": "top_level_cluster_addr", + "disable_clustering": true, + "redirect_addr": "top_level_api_addr", + "type": "consul"}, + "listeners": []interface{}{ + map[string]interface{}{ + "config": map[string]interface{}{ + "address": "127.0.0.1:443", + }, + "type": "tcp", + }, + }, + "log_format": "", + "log_level": "", + "max_lease_ttl": 10 * time.Hour, + "pid_file": "./pidfile", + "plugin_directory": "", + "seals": []interface{}{ + map[string]interface{}{ + "disabled": false, + "type": "awskms", + }, + }, + "storage": map[string]interface{}{ + "cluster_addr": "top_level_cluster_addr", + "disable_clustering": false, + "redirect_addr": "top_level_api_addr", + "type": "consul", + }, + "telemetry": map[string]interface{}{ + "circonus_api_app": "", + "circonus_api_token": "", + "circonus_api_url": "", + "circonus_broker_id": "", + "circonus_broker_select_tag": "", + "circonus_check_display_name": "", + "circonus_check_force_metric_activation": "", + "circonus_check_id": "", + "circonus_check_instance_id": "", + "circonus_check_search_tag": "", + "circonus_submission_url": "", + "circonus_check_tags": "", + "circonus_submission_interval": "", + "disable_hostname": false, + "dogstatsd_addr": "", + "dogstatsd_tags": []string(nil), + "prometheus_retention_time": 24 * time.Hour, + "stackdriver_location": "", + "stackdriver_namespace": "", + "stackdriver_project_id": "", + "statsd_address": "bar", + "statsite_address": ""}, + } + + if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +func testParseListeners(t *testing.T) { + obj, _ := hcl.Parse(strings.TrimSpace(` +listener "tcp" { + address = "127.0.0.1:443" + cluster_address = "127.0.0.1:8201" + tls_disable = false + tls_cert_file = "./certs/server.crt" + tls_key_file = "./certs/server.key" + tls_client_ca_file = "./certs/rootca.crt" + tls_min_version = "tls12" + tls_require_and_verify_client_cert = true + tls_disable_client_certs = true +}`)) + + var config Config + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("listener") + parseListeners(&config, objList) + listeners := config.Listeners + if len(listeners) == 0 { + t.Fatalf("expected at least one listener in the config") + } + listener := listeners[0] + if listener.Type != "tcp" { + t.Fatalf("expected tcp listener in the config") + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + "cluster_address": "127.0.0.1:8201", + "tls_disable": false, + "tls_cert_file": "./certs/server.crt", + "tls_key_file": "./certs/server.key", + "tls_client_ca_file": "./certs/rootca.crt", + "tls_min_version": "tls12", + "tls_require_and_verify_client_cert": true, + "tls_disable_client_certs": true, + }, + }, + }, + } + + if !reflect.DeepEqual(config, *expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, *expected) + } + +} diff --git a/vendor/github.com/hashicorp/vault/command/server/config_util.go b/vendor/github.com/hashicorp/vault/command/server/config_util.go new file mode 100644 index 00000000..cade182e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/command/server/config_util.go @@ -0,0 +1,13 @@ +package server + +import ( + "github.com/hashicorp/hcl/hcl/ast" +) + +var ( + parseEntropy = parseEntropyOSS +) + +func parseEntropyOSS(result *Config, list *ast.ObjectList, blockName string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/vault/command/server/listener.go b/vendor/github.com/hashicorp/vault/command/server/listener.go new file mode 100644 index 00000000..173b76ff --- /dev/null +++ b/vendor/github.com/hashicorp/vault/command/server/listener.go @@ -0,0 +1,68 @@ +package server + +import ( + "github.com/hashicorp/errwrap" + // We must import sha512 so that it registers with the runtime so that + // certificates that use it can be parsed. + _ "crypto/sha512" + "fmt" + "io" + "net" + + "github.com/hashicorp/vault/helper/proxyutil" + "github.com/hashicorp/vault/helper/reload" + "github.com/mitchellh/cli" +) + +// ListenerFactory is the factory function to create a listener. +type ListenerFactory func(map[string]interface{}, io.Writer, cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) + +// BuiltinListeners is the list of built-in listener types. +var BuiltinListeners = map[string]ListenerFactory{ + "tcp": tcpListenerFactory, +} + +// NewListener creates a new listener of the given type with the given +// configuration. The type is looked up in the BuiltinListeners map. +func NewListener(t string, config map[string]interface{}, logger io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) { + f, ok := BuiltinListeners[t] + if !ok { + return nil, nil, nil, fmt.Errorf("unknown listener type: %q", t) + } + + return f(config, logger, ui) +} + +func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.Listener, error) { + behaviorRaw, ok := config["proxy_protocol_behavior"] + if !ok { + return ln, nil + } + + behavior, ok := behaviorRaw.(string) + if !ok { + return nil, fmt.Errorf("failed parsing proxy_protocol_behavior value: not a string") + } + + proxyProtoConfig := &proxyutil.ProxyProtoConfig{ + Behavior: behavior, + } + + if proxyProtoConfig.Behavior == "allow_authorized" || proxyProtoConfig.Behavior == "deny_unauthorized" { + authorizedAddrsRaw, ok := config["proxy_protocol_authorized_addrs"] + if !ok { + return nil, fmt.Errorf("proxy_protocol_behavior set but no proxy_protocol_authorized_addrs value") + } + + if err := proxyProtoConfig.SetAuthorizedAddrs(authorizedAddrsRaw); err != nil { + return nil, errwrap.Wrapf("failed parsing proxy_protocol_authorized_addrs: {{err}}", err) + } + } + + newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig) + if err != nil { + return nil, errwrap.Wrapf("failed configuring PROXY protocol wrapper: {{err}}", err) + } + + return newLn, nil +} diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go new file mode 100644 index 00000000..6552a091 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go @@ -0,0 +1,124 @@ +package server + +import ( + "fmt" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/listenerutil" + "github.com/hashicorp/vault/helper/reload" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/mitchellh/cli" +) + +func tcpListenerFactory(config map[string]interface{}, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) { + bindProto := "tcp" + var addr string + addrRaw, ok := config["address"] + if !ok { + addr = "127.0.0.1:8200" + } else { + addr = addrRaw.(string) + } + + // If they've passed 0.0.0.0, we only want to bind on IPv4 + // rather than golang's dual stack default + if strings.HasPrefix(addr, "0.0.0.0:") { + bindProto = "tcp4" + } + + ln, err := net.Listen(bindProto, addr) + if err != nil { + return nil, nil, nil, err + } + + ln = TCPKeepAliveListener{ln.(*net.TCPListener)} + + ln, err = listenerWrapProxy(ln, config) + if err != nil { + return nil, nil, nil, err + } + + props := map[string]string{"addr": addr} + + ffAllowedRaw, ffAllowedOK := config["x_forwarded_for_authorized_addrs"] + if ffAllowedOK { + ffAllowed, err := parseutil.ParseAddrs(ffAllowedRaw) + if err != nil { + return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_authorized_addrs\": {{err}}", err) + } + props["x_forwarded_for_authorized_addrs"] = fmt.Sprintf("%v", ffAllowed) + config["x_forwarded_for_authorized_addrs"] = ffAllowed + } + + if ffHopsRaw, ok := config["x_forwarded_for_hop_skips"]; ok { + ffHops64, err := parseutil.ParseInt(ffHopsRaw) + if err != nil { + return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_hop_skips\": {{err}}", err) + } + if ffHops64 < 0 { + return nil, nil, nil, fmt.Errorf("\"x_forwarded_for_hop_skips\" cannot be negative") + } + ffHops := int(ffHops64) + props["x_forwarded_for_hop_skips"] = strconv.Itoa(ffHops) + config["x_forwarded_for_hop_skips"] = ffHops + } else if ffAllowedOK { + props["x_forwarded_for_hop_skips"] = "0" + config["x_forwarded_for_hop_skips"] = int(0) + } + + if ffRejectNotPresentRaw, ok := config["x_forwarded_for_reject_not_present"]; ok { + ffRejectNotPresent, err := parseutil.ParseBool(ffRejectNotPresentRaw) + if err != nil { + return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_reject_not_present\": {{err}}", err) + } + props["x_forwarded_for_reject_not_present"] = strconv.FormatBool(ffRejectNotPresent) + config["x_forwarded_for_reject_not_present"] = ffRejectNotPresent + } else if ffAllowedOK { + props["x_forwarded_for_reject_not_present"] = "true" + config["x_forwarded_for_reject_not_present"] = true + } + + if ffRejectNonAuthorizedRaw, ok := config["x_forwarded_for_reject_not_authorized"]; ok { + ffRejectNonAuthorized, err := parseutil.ParseBool(ffRejectNonAuthorizedRaw) + if err != nil { + return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_reject_not_authorized\": {{err}}", err) + } + props["x_forwarded_for_reject_not_authorized"] = strconv.FormatBool(ffRejectNonAuthorized) + config["x_forwarded_for_reject_not_authorized"] = ffRejectNonAuthorized + } else if ffAllowedOK { + props["x_forwarded_for_reject_not_authorized"] = "true" + config["x_forwarded_for_reject_not_authorized"] = true + } + + ln, props, reloadFunc, _, err := listenerutil.WrapTLS(ln, props, config, ui) + if err != nil { + return nil, nil, nil, err + } + + return ln, props, reloadFunc, nil +} + +// TCPKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// +// This is copied directly from the Go source code. +type TCPKeepAliveListener struct { + *net.TCPListener +} + +func (ln TCPKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go new file mode 100644 index 00000000..d17c4f1b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go @@ -0,0 +1,357 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: helper/forwarding/types.proto + +package forwarding + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Request struct { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Body []byte `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` + Host string `protobuf:"bytes,6,opt,name=host,proto3" json:"host,omitempty"` + RemoteAddr string `protobuf:"bytes,7,opt,name=remote_addr,json=remoteAddr,proto3" json:"remote_addr,omitempty"` + PeerCertificates [][]byte `protobuf:"bytes,8,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_e38697de88a2f47c, []int{0} +} + +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *Request) GetUrl() *URL { + if m != nil { + return m.Url + } + return nil +} + +func (m *Request) GetHeaderEntries() map[string]*HeaderEntry { + if m != nil { + return m.HeaderEntries + } + return nil +} + +func (m *Request) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *Request) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Request) GetRemoteAddr() string { + if m != nil { + return m.RemoteAddr + } + return "" +} + +func (m *Request) GetPeerCertificates() [][]byte { + if m != nil { + return m.PeerCertificates + } + return nil +} + +type URL struct { + Scheme string `protobuf:"bytes,1,opt,name=scheme,proto3" json:"scheme,omitempty"` + Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + //UserInfo user = 3; + Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` + // This also isn't needed right now, but we'll reserve the number + //bool force_query = 7; + RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` + Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URL) Reset() { *m = URL{} } +func (m *URL) String() string { return proto.CompactTextString(m) } +func (*URL) ProtoMessage() {} +func (*URL) Descriptor() ([]byte, []int) { + return fileDescriptor_e38697de88a2f47c, []int{1} +} + +func (m *URL) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URL.Unmarshal(m, b) +} +func (m *URL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URL.Marshal(b, m, deterministic) +} +func (m *URL) XXX_Merge(src proto.Message) { + xxx_messageInfo_URL.Merge(m, src) +} +func (m *URL) XXX_Size() int { + return xxx_messageInfo_URL.Size(m) +} +func (m *URL) XXX_DiscardUnknown() { + xxx_messageInfo_URL.DiscardUnknown(m) +} + +var xxx_messageInfo_URL proto.InternalMessageInfo + +func (m *URL) GetScheme() string { + if m != nil { + return m.Scheme + } + return "" +} + +func (m *URL) GetOpaque() string { + if m != nil { + return m.Opaque + } + return "" +} + +func (m *URL) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *URL) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *URL) GetRawPath() string { + if m != nil { + return m.RawPath + } + return "" +} + +func (m *URL) GetRawQuery() string { + if m != nil { + return m.RawQuery + } + return "" +} + +func (m *URL) GetFragment() string { + if m != nil { + return m.Fragment + } + return "" +} + +type HeaderEntry struct { + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HeaderEntry) Reset() { *m = HeaderEntry{} } +func (m *HeaderEntry) String() string { return proto.CompactTextString(m) } +func (*HeaderEntry) ProtoMessage() {} +func (*HeaderEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_e38697de88a2f47c, []int{2} +} + +func (m *HeaderEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HeaderEntry.Unmarshal(m, b) +} +func (m *HeaderEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HeaderEntry.Marshal(b, m, deterministic) +} +func (m *HeaderEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderEntry.Merge(m, src) +} +func (m *HeaderEntry) XXX_Size() int { + return xxx_messageInfo_HeaderEntry.Size(m) +} +func (m *HeaderEntry) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderEntry proto.InternalMessageInfo + +func (m *HeaderEntry) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +type Response struct { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + LastRemoteWal uint64 `protobuf:"varint,5,opt,name=last_remote_wal,json=lastRemoteWal,proto3" json:"last_remote_wal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_e38697de88a2f47c, []int{3} +} + +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Response) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *Response) GetHeaderEntries() map[string]*HeaderEntry { + if m != nil { + return m.HeaderEntries + } + return nil +} + +func (m *Response) GetLastRemoteWal() uint64 { + if m != nil { + return m.LastRemoteWal + } + return 0 +} + +func init() { + proto.RegisterType((*Request)(nil), "forwarding.Request") + proto.RegisterMapType((map[string]*HeaderEntry)(nil), "forwarding.Request.HeaderEntriesEntry") + proto.RegisterType((*URL)(nil), "forwarding.URL") + proto.RegisterType((*HeaderEntry)(nil), "forwarding.HeaderEntry") + proto.RegisterType((*Response)(nil), "forwarding.Response") + proto.RegisterMapType((map[string]*HeaderEntry)(nil), "forwarding.Response.HeaderEntriesEntry") +} + +func init() { proto.RegisterFile("helper/forwarding/types.proto", fileDescriptor_e38697de88a2f47c) } + +var fileDescriptor_e38697de88a2f47c = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x95, 0xe3, 0xb4, 0x49, 0x26, 0x0d, 0x2d, 0x7b, 0x80, 0xa5, 0x08, 0x61, 0x22, 0x51, 0x22, + 0x01, 0x8e, 0x14, 0x2e, 0x88, 0x1b, 0x54, 0x48, 0x1c, 0x0a, 0x82, 0x95, 0x2a, 0x04, 0x17, 0x6b, + 0xe3, 0x9d, 0x64, 0x2d, 0xec, 0xac, 0xb3, 0xbb, 0x6e, 0xe4, 0xdf, 0xe0, 0x4f, 0xf8, 0x27, 0x3e, + 0x04, 0xed, 0xda, 0x34, 0x46, 0x15, 0x12, 0x17, 0x4e, 0x99, 0xf7, 0xde, 0x64, 0x3c, 0x6f, 0x66, + 0x16, 0x1e, 0x48, 0xcc, 0x4b, 0xd4, 0xf3, 0x95, 0xd2, 0x3b, 0xae, 0x45, 0xb6, 0x59, 0xcf, 0x6d, + 0x5d, 0xa2, 0x89, 0x4b, 0xad, 0xac, 0x22, 0xb0, 0xe7, 0xa7, 0x3f, 0x7b, 0x30, 0x60, 0xb8, 0xad, + 0xd0, 0x58, 0x72, 0x07, 0x0e, 0x0b, 0xb4, 0x52, 0x09, 0xda, 0x8b, 0x82, 0xd9, 0x88, 0xb5, 0x88, + 0x3c, 0x82, 0xb0, 0xd2, 0x39, 0x0d, 0xa3, 0x60, 0x36, 0x5e, 0x1c, 0xc7, 0xfb, 0x7f, 0xc7, 0x97, + 0xec, 0x82, 0x39, 0x8d, 0xbc, 0x87, 0x5b, 0x12, 0xb9, 0x40, 0x9d, 0xe0, 0xc6, 0xea, 0x0c, 0x0d, + 0xed, 0x47, 0xe1, 0x6c, 0xbc, 0x38, 0xeb, 0x66, 0xb7, 0xdf, 0x89, 0xdf, 0xf9, 0xcc, 0xb7, 0x4d, + 0xa2, 0xfb, 0xa9, 0xd9, 0x44, 0x76, 0x39, 0x42, 0xa0, 0xbf, 0x54, 0xa2, 0xa6, 0x07, 0x51, 0x30, + 0x3b, 0x62, 0x3e, 0x76, 0x9c, 0x54, 0xc6, 0xd2, 0x43, 0xdf, 0x9b, 0x8f, 0xc9, 0x43, 0x18, 0x6b, + 0x2c, 0x94, 0xc5, 0x84, 0x0b, 0xa1, 0xe9, 0xc0, 0x4b, 0xd0, 0x50, 0xaf, 0x85, 0xd0, 0xe4, 0x29, + 0xdc, 0x2e, 0x11, 0x75, 0x92, 0xa2, 0xb6, 0xd9, 0x2a, 0x4b, 0xb9, 0x45, 0x43, 0x87, 0x51, 0x38, + 0x3b, 0x62, 0x27, 0x4e, 0x38, 0xef, 0xf0, 0xa7, 0x5f, 0x80, 0xdc, 0x6c, 0x8d, 0x9c, 0x40, 0xf8, + 0x0d, 0x6b, 0x1a, 0xf8, 0xda, 0x2e, 0x24, 0xcf, 0xe1, 0xe0, 0x8a, 0xe7, 0x15, 0xfa, 0x31, 0x8d, + 0x17, 0x77, 0xbb, 0x1e, 0xf7, 0x05, 0x6a, 0xd6, 0x64, 0xbd, 0xea, 0xbd, 0x0c, 0xa6, 0x3f, 0x02, + 0x08, 0x2f, 0xd9, 0x85, 0x1b, 0xb1, 0x49, 0x25, 0x16, 0xd8, 0xd6, 0x6b, 0x91, 0xe3, 0x55, 0xc9, + 0xb7, 0x6d, 0xcd, 0x11, 0x6b, 0xd1, 0xb5, 0xe9, 0x7e, 0xc7, 0x34, 0x81, 0x7e, 0xc9, 0xad, 0xf4, + 0xc3, 0x19, 0x31, 0x1f, 0x93, 0x7b, 0x30, 0xd4, 0x7c, 0x97, 0x78, 0xbe, 0x19, 0xd0, 0x40, 0xf3, + 0xdd, 0x47, 0x27, 0xdd, 0x87, 0x91, 0x93, 0xb6, 0x15, 0xea, 0x9a, 0x0e, 0xbd, 0xe6, 0x72, 0x3f, + 0x39, 0x4c, 0x4e, 0x61, 0xb8, 0xd2, 0x7c, 0x5d, 0xe0, 0xc6, 0xd2, 0x51, 0xa3, 0xfd, 0xc6, 0xd3, + 0xc7, 0x30, 0xee, 0xb8, 0x71, 0x2d, 0x7a, 0x3f, 0x86, 0x06, 0x51, 0xe8, 0x5a, 0x6c, 0xd0, 0xf4, + 0x7b, 0x0f, 0x86, 0x0c, 0x4d, 0xa9, 0x36, 0x06, 0xdd, 0x42, 0x8c, 0xe5, 0xb6, 0x32, 0x49, 0xaa, + 0x44, 0x63, 0x66, 0xc2, 0xa0, 0xa1, 0xce, 0x95, 0xc0, 0xeb, 0xcd, 0x86, 0x9d, 0xcd, 0x7e, 0xf8, + 0xcb, 0xf1, 0x3c, 0xf9, 0xf3, 0x78, 0x9a, 0x4f, 0xfc, 0xc3, 0xf5, 0x9c, 0xc1, 0x71, 0xce, 0x8d, + 0x4d, 0xda, 0xd3, 0xd8, 0xf1, 0xdc, 0xcf, 0xaa, 0xcf, 0x26, 0x8e, 0x66, 0x9e, 0xfd, 0xcc, 0xf3, + 0xff, 0xb8, 0xef, 0x37, 0xf1, 0xd7, 0x67, 0xeb, 0xcc, 0xca, 0x6a, 0x19, 0xa7, 0xaa, 0x98, 0x4b, + 0x6e, 0x64, 0x96, 0x2a, 0x5d, 0xce, 0xaf, 0x78, 0x95, 0xdb, 0xf9, 0x8d, 0xe7, 0xb9, 0x3c, 0xf4, + 0x2f, 0xf3, 0xc5, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbd, 0xb1, 0xfc, 0xba, 0x03, 0x00, + 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto b/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto new file mode 100644 index 00000000..8f1376a1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/forwarding"; + +package forwarding; + +message Request { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + string method = 2; + URL url = 3; + map header_entries = 4; + bytes body = 5; + string host = 6; + string remote_addr = 7; + repeated bytes peer_certificates = 8; +} + +message URL { + string scheme = 1; + string opaque = 2; + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + //UserInfo user = 3; + string host = 4; + string path = 5; + string raw_path = 6; + // This also isn't needed right now, but we'll reserve the number + //bool force_query = 7; + string raw_query = 8; + string fragment = 9; +} + +message HeaderEntry { + repeated string values = 1; +} + +message Response { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + uint32 status_code = 2; + bytes body = 3; + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + map header_entries = 4; + uint64 last_remote_wal = 5; +} diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/util.go b/vendor/github.com/hashicorp/vault/helper/forwarding/util.go new file mode 100644 index 00000000..90334052 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/forwarding/util.go @@ -0,0 +1,218 @@ +package forwarding + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/vault/sdk/helper/compressutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +type bufCloser struct { + *bytes.Buffer +} + +func (b bufCloser) Close() error { + b.Reset() + return nil +} + +// GenerateForwardedRequest generates a new http.Request that contains the +// original requests's information in the new request's body. +func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) { + fq, err := GenerateForwardedRequest(req) + if err != nil { + return nil, err + } + + var newBody []byte + switch os.Getenv("VAULT_MESSAGE_TYPE") { + case "json": + newBody, err = jsonutil.EncodeJSON(fq) + case "json_compress": + newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeLZW, + }) + case "proto3": + fallthrough + default: + newBody, err = proto.Marshal(fq) + } + if err != nil { + return nil, err + } + + ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody)) + if err != nil { + return nil, err + } + + return ret, nil +} + +func GenerateForwardedRequest(req *http.Request) (*Request, error) { + var reader io.Reader = req.Body + ctx := req.Context() + maxRequestSize := ctx.Value("max_request_size") + if maxRequestSize != nil { + max, ok := maxRequestSize.(int64) + if !ok { + return nil, errors.New("could not parse max_request_size from request context") + } + if max > 0 { + reader = io.LimitReader(req.Body, max) + } + } + + body, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + fq := Request{ + Method: req.Method, + HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)), + Host: req.Host, + RemoteAddr: req.RemoteAddr, + Body: body, + } + + reqURL := req.URL + fq.Url = &URL{ + Scheme: reqURL.Scheme, + Opaque: reqURL.Opaque, + Host: reqURL.Host, + Path: reqURL.Path, + RawPath: reqURL.RawPath, + RawQuery: reqURL.RawQuery, + Fragment: reqURL.Fragment, + } + + for k, v := range req.Header { + fq.HeaderEntries[k] = &HeaderEntry{ + Values: v, + } + } + + if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 { + fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates)) + for i, cert := range req.TLS.PeerCertificates { + fq.PeerCertificates[i] = cert.Raw + } + } + + return &fq, nil +} + +// ParseForwardedRequest generates a new http.Request that is comprised of the +// values in the given request's body, assuming it correctly parses into a +// ForwardedRequest. +func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) { + buf := bytes.NewBuffer(nil) + _, err := buf.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + fq := new(Request) + switch os.Getenv("VAULT_MESSAGE_TYPE") { + case "json", "json_compress": + err = jsonutil.DecodeJSON(buf.Bytes(), fq) + default: + err = proto.Unmarshal(buf.Bytes(), fq) + } + if err != nil { + return nil, err + } + + return ParseForwardedRequest(fq) +} + +func ParseForwardedRequest(fq *Request) (*http.Request, error) { + buf := bufCloser{ + Buffer: bytes.NewBuffer(fq.Body), + } + + ret := &http.Request{ + Method: fq.Method, + Header: make(map[string][]string, len(fq.HeaderEntries)), + Body: buf, + Host: fq.Host, + RemoteAddr: fq.RemoteAddr, + } + + ret.URL = &url.URL{ + Scheme: fq.Url.Scheme, + Opaque: fq.Url.Opaque, + Host: fq.Url.Host, + Path: fq.Url.Path, + RawPath: fq.Url.RawPath, + RawQuery: fq.Url.RawQuery, + Fragment: fq.Url.Fragment, + } + + for k, v := range fq.HeaderEntries { + ret.Header[k] = v.Values + } + + if fq.PeerCertificates != nil && len(fq.PeerCertificates) > 0 { + ret.TLS = &tls.ConnectionState{ + PeerCertificates: make([]*x509.Certificate, len(fq.PeerCertificates)), + } + for i, certBytes := range fq.PeerCertificates { + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + ret.TLS.PeerCertificates[i] = cert + } + } + + return ret, nil +} + +type RPCResponseWriter struct { + statusCode int + header http.Header + body *bytes.Buffer +} + +// NewRPCResponseWriter returns an initialized RPCResponseWriter +func NewRPCResponseWriter() *RPCResponseWriter { + w := &RPCResponseWriter{ + header: make(http.Header), + body: new(bytes.Buffer), + statusCode: 200, + } + //w.header.Set("Content-Type", "application/octet-stream") + return w +} + +func (w *RPCResponseWriter) Header() http.Header { + return w.header +} + +func (w *RPCResponseWriter) Write(buf []byte) (int, error) { + w.body.Write(buf) + return len(buf), nil +} + +func (w *RPCResponseWriter) WriteHeader(code int) { + w.statusCode = code +} + +func (w *RPCResponseWriter) StatusCode() int { + return w.statusCode +} + +func (w *RPCResponseWriter) Body() *bytes.Buffer { + return w.body +} diff --git a/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo.go b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo.go new file mode 100644 index 00000000..5244b181 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo.go @@ -0,0 +1,90 @@ +// +build !openbsd + +package hostutil + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" +) + +// HostInfo holds all the information that gets captured on the host. The +// set of information captured depends on the host operating system. For more +// information, refer to: https://github.com/shirou/gopsutil#current-status +type HostInfo struct { + // Timestamp returns the timestamp in UTC on the collection time. + Timestamp time.Time `json:"timestamp"` + // CPU returns information about the CPU such as family, model, cores, etc. + CPU []cpu.InfoStat `json:"cpu"` + // CPUTimes returns statistics on CPU usage represented in Jiffies. + CPUTimes []cpu.TimesStat `json:"cpu_times"` + // Disk returns statitics on disk usage for all accessible partitions. + Disk []*disk.UsageStat `json:"disk"` + // Host returns general host information such as hostname, platform, uptime, + // kernel version, etc. + Host *host.InfoStat `json:"host"` + // Memory contains statistics about the memory such as total, available, and + // used memory in number of bytes. + Memory *mem.VirtualMemoryStat `json:"memory"` +} + +// CollectHostInfo returns information on the host, which includes general +// host status, CPU, memory, and disk utilization. +// +// The function does a best-effort capture on the most information possible, +// continuing on capture errors encountered and appending them to a resulting +// multierror.Error that gets returned at the end. +func CollectHostInfo(ctx context.Context) (*HostInfo, error) { + var retErr *multierror.Error + info := &HostInfo{Timestamp: time.Now().UTC()} + + if h, err := host.InfoWithContext(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"host", err}) + } else { + info.Host = h + } + + if v, err := mem.VirtualMemoryWithContext(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"memory", err}) + } else { + info.Memory = v + } + + parts, err := disk.PartitionsWithContext(ctx, false) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"disk", err}) + } else { + var usage []*disk.UsageStat + for i, part := range parts { + u, err := disk.UsageWithContext(ctx, part.Mountpoint) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{fmt.Sprintf("disk.%d", i), err}) + continue + } + usage = append(usage, u) + + } + info.Disk = usage + } + + if c, err := cpu.InfoWithContext(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"cpu", err}) + } else { + info.CPU = c + } + + t, err := cpu.TimesWithContext(ctx, true) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"cpu_times", err}) + } else { + info.CPUTimes = t + } + + return info, retErr.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_error.go b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_error.go new file mode 100644 index 00000000..ca5d8a29 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_error.go @@ -0,0 +1,17 @@ +package hostutil + +import "fmt" + +// HostInfoError is a typed error for more convenient error checking. +type HostInfoError struct { + Type string + Err error +} + +func (e *HostInfoError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *HostInfoError) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Err.Error()) +} diff --git a/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_openbsd.go b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_openbsd.go new file mode 100644 index 00000000..b092d9b6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/hostutil/hostinfo_openbsd.go @@ -0,0 +1,22 @@ +// +build openbsd + +package hostutil + +import ( + "context" + "fmt" + "time" +) + +type HostInfo struct { + Timestamp time.Time `json:"timestamp"` + CPU []interface{} `json:"cpu"` + CPUTimes []interface{} `json:"cpu_times"` + Disk []interface{} `json:"disk"` + Host interface{} `json:"host"` + Memory interface{} `json:"memory"` +} + +func CollectHostInfo(ctx context.Context) (*HostInfo, error) { + return nil, fmt.Errorf("host info not supported on this platform") +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/identity.go b/vendor/github.com/hashicorp/vault/helper/identity/identity.go new file mode 100644 index 00000000..46789c03 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/identity.go @@ -0,0 +1,65 @@ +package identity + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" +) + +func (g *Group) Clone() (*Group, error) { + if g == nil { + return nil, fmt.Errorf("nil group") + } + + marshaledGroup, err := proto.Marshal(g) + if err != nil { + return nil, errwrap.Wrapf("failed to marshal group: {{err}}", err) + } + + var clonedGroup Group + err = proto.Unmarshal(marshaledGroup, &clonedGroup) + if err != nil { + return nil, errwrap.Wrapf("failed to unmarshal group: {{err}}", err) + } + + return &clonedGroup, nil +} + +func (e *Entity) Clone() (*Entity, error) { + if e == nil { + return nil, fmt.Errorf("nil entity") + } + + marshaledEntity, err := proto.Marshal(e) + if err != nil { + return nil, errwrap.Wrapf("failed to marshal entity: {{err}}", err) + } + + var clonedEntity Entity + err = proto.Unmarshal(marshaledEntity, &clonedEntity) + if err != nil { + return nil, errwrap.Wrapf("failed to unmarshal entity: {{err}}", err) + } + + return &clonedEntity, nil +} + +func (p *Alias) Clone() (*Alias, error) { + if p == nil { + return nil, fmt.Errorf("nil alias") + } + + marshaledAlias, err := proto.Marshal(p) + if err != nil { + return nil, errwrap.Wrapf("failed to marshal alias: {{err}}", err) + } + + var clonedAlias Alias + err = proto.Unmarshal(marshaledAlias, &clonedAlias) + if err != nil { + return nil, errwrap.Wrapf("failed to unmarshal alias: {{err}}", err) + } + + return &clonedAlias, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/mfa/mfa.go b/vendor/github.com/hashicorp/vault/helper/identity/mfa/mfa.go new file mode 100644 index 00000000..d14f8340 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/mfa/mfa.go @@ -0,0 +1,27 @@ +package mfa + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" +) + +func (c *Config) Clone() (*Config, error) { + if c == nil { + return nil, fmt.Errorf("nil config") + } + + marshaledConfig, err := proto.Marshal(c) + if err != nil { + return nil, errwrap.Wrapf("failed to marshal config: {{err}}", err) + } + + var clonedConfig Config + err = proto.Unmarshal(marshaledConfig, &clonedConfig) + if err != nil { + return nil, errwrap.Wrapf("failed to unmarshal config: {{err}}", err) + } + + return &clonedConfig, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/mfa/sentinel.go b/vendor/github.com/hashicorp/vault/helper/identity/mfa/sentinel.go new file mode 100644 index 00000000..f6d8c7b9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/mfa/sentinel.go @@ -0,0 +1,25 @@ +package mfa + +func (c *Config) SentinelGet(key string) (interface{}, error) { + if c == nil { + return nil, nil + } + switch key { + case "type": + return c.Type, nil + case "name": + return c.Name, nil + case "mount_accessor": + return c.MountAccessor, nil + } + + return nil, nil +} + +func (c *Config) SentinelKeys() []string { + return []string{ + "type", + "name", + "mount_accessor", + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.pb.go b/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.pb.go new file mode 100644 index 00000000..89f48e54 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.pb.go @@ -0,0 +1,731 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: helper/identity/mfa/types.proto + +package mfa + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Config represents the configuration information used *along with* the MFA +// secret tied to caller's identity, to verify the MFA credentials supplied. +// Configuration information differs by type. Handler of each type should know +// what to expect from the Config field. +type Config struct { + Type string `sentinel:"" protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + ID string `sentinel:"" protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` + MountAccessor string `sentinel:"" protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + UsernameFormat string `sentinel:"" protobuf:"bytes,5,opt,name=username_format,json=usernameFormat,proto3" json:"username_format,omitempty"` + // Types that are valid to be assigned to Config: + // *Config_TOTPConfig + // *Config_OktaConfig + // *Config_DuoConfig + // *Config_PingIDConfig + Config isConfig_Config `protobuf_oneof:"config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{0} +} + +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (m *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(m, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Config) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Config) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Config) GetMountAccessor() string { + if m != nil { + return m.MountAccessor + } + return "" +} + +func (m *Config) GetUsernameFormat() string { + if m != nil { + return m.UsernameFormat + } + return "" +} + +type isConfig_Config interface { + isConfig_Config() +} + +type Config_TOTPConfig struct { + TOTPConfig *TOTPConfig `sentinel:"" protobuf:"bytes,6,opt,name=totp_config,json=totpConfig,proto3,oneof"` +} + +type Config_OktaConfig struct { + OktaConfig *OktaConfig `sentinel:"" protobuf:"bytes,7,opt,name=okta_config,json=oktaConfig,proto3,oneof"` +} + +type Config_DuoConfig struct { + DuoConfig *DuoConfig `sentinel:"" protobuf:"bytes,8,opt,name=duo_config,json=duoConfig,proto3,oneof"` +} + +type Config_PingIDConfig struct { + PingIDConfig *PingIDConfig `sentinel:"" protobuf:"bytes,9,opt,name=pingid_config,json=pingidConfig,proto3,oneof"` +} + +func (*Config_TOTPConfig) isConfig_Config() {} + +func (*Config_OktaConfig) isConfig_Config() {} + +func (*Config_DuoConfig) isConfig_Config() {} + +func (*Config_PingIDConfig) isConfig_Config() {} + +func (m *Config) GetConfig() isConfig_Config { + if m != nil { + return m.Config + } + return nil +} + +func (m *Config) GetTOTPConfig() *TOTPConfig { + if x, ok := m.GetConfig().(*Config_TOTPConfig); ok { + return x.TOTPConfig + } + return nil +} + +func (m *Config) GetOktaConfig() *OktaConfig { + if x, ok := m.GetConfig().(*Config_OktaConfig); ok { + return x.OktaConfig + } + return nil +} + +func (m *Config) GetDuoConfig() *DuoConfig { + if x, ok := m.GetConfig().(*Config_DuoConfig); ok { + return x.DuoConfig + } + return nil +} + +func (m *Config) GetPingIDConfig() *PingIDConfig { + if x, ok := m.GetConfig().(*Config_PingIDConfig); ok { + return x.PingIDConfig + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Config) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Config_TOTPConfig)(nil), + (*Config_OktaConfig)(nil), + (*Config_DuoConfig)(nil), + (*Config_PingIDConfig)(nil), + } +} + +// TOTPConfig represents the configuration information required to generate +// a TOTP key. The generated key will be stored in the entity along with these +// options. Validation of credentials supplied over the API will be validated +// by the information stored in the entity and not from the values in the +// configuration. +type TOTPConfig struct { + Issuer string `sentinel:"" protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"` + Period uint32 `sentinel:"" protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty"` + Algorithm int32 `sentinel:"" protobuf:"varint,3,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + Digits int32 `sentinel:"" protobuf:"varint,4,opt,name=digits,proto3" json:"digits,omitempty"` + Skew uint32 `sentinel:"" protobuf:"varint,5,opt,name=skew,proto3" json:"skew,omitempty"` + KeySize uint32 `sentinel:"" protobuf:"varint,6,opt,name=key_size,json=keySize,proto3" json:"key_size,omitempty"` + QRSize int32 `sentinel:"" protobuf:"varint,7,opt,name=qr_size,json=qrSize,proto3" json:"qr_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TOTPConfig) Reset() { *m = TOTPConfig{} } +func (m *TOTPConfig) String() string { return proto.CompactTextString(m) } +func (*TOTPConfig) ProtoMessage() {} +func (*TOTPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{1} +} + +func (m *TOTPConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TOTPConfig.Unmarshal(m, b) +} +func (m *TOTPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TOTPConfig.Marshal(b, m, deterministic) +} +func (m *TOTPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TOTPConfig.Merge(m, src) +} +func (m *TOTPConfig) XXX_Size() int { + return xxx_messageInfo_TOTPConfig.Size(m) +} +func (m *TOTPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TOTPConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TOTPConfig proto.InternalMessageInfo + +func (m *TOTPConfig) GetIssuer() string { + if m != nil { + return m.Issuer + } + return "" +} + +func (m *TOTPConfig) GetPeriod() uint32 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *TOTPConfig) GetAlgorithm() int32 { + if m != nil { + return m.Algorithm + } + return 0 +} + +func (m *TOTPConfig) GetDigits() int32 { + if m != nil { + return m.Digits + } + return 0 +} + +func (m *TOTPConfig) GetSkew() uint32 { + if m != nil { + return m.Skew + } + return 0 +} + +func (m *TOTPConfig) GetKeySize() uint32 { + if m != nil { + return m.KeySize + } + return 0 +} + +func (m *TOTPConfig) GetQRSize() int32 { + if m != nil { + return m.QRSize + } + return 0 +} + +// DuoConfig represents the configuration information required to perform +// Duo authentication. +type DuoConfig struct { + IntegrationKey string `sentinel:"" protobuf:"bytes,1,opt,name=integration_key,json=integrationKey,proto3" json:"integration_key,omitempty"` + SecretKey string `sentinel:"" protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` + APIHostname string `sentinel:"" protobuf:"bytes,3,opt,name=api_hostname,json=apiHostname,proto3" json:"api_hostname,omitempty"` + PushInfo string `sentinel:"" protobuf:"bytes,4,opt,name=push_info,json=pushInfo,proto3" json:"push_info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DuoConfig) Reset() { *m = DuoConfig{} } +func (m *DuoConfig) String() string { return proto.CompactTextString(m) } +func (*DuoConfig) ProtoMessage() {} +func (*DuoConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{2} +} + +func (m *DuoConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DuoConfig.Unmarshal(m, b) +} +func (m *DuoConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DuoConfig.Marshal(b, m, deterministic) +} +func (m *DuoConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DuoConfig.Merge(m, src) +} +func (m *DuoConfig) XXX_Size() int { + return xxx_messageInfo_DuoConfig.Size(m) +} +func (m *DuoConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DuoConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DuoConfig proto.InternalMessageInfo + +func (m *DuoConfig) GetIntegrationKey() string { + if m != nil { + return m.IntegrationKey + } + return "" +} + +func (m *DuoConfig) GetSecretKey() string { + if m != nil { + return m.SecretKey + } + return "" +} + +func (m *DuoConfig) GetAPIHostname() string { + if m != nil { + return m.APIHostname + } + return "" +} + +func (m *DuoConfig) GetPushInfo() string { + if m != nil { + return m.PushInfo + } + return "" +} + +// OktaConfig contains Okta configuration parameters required to perform Okta +// authentication. +type OktaConfig struct { + OrgName string `sentinel:"" protobuf:"bytes,1,opt,name=org_name,json=orgName,proto3" json:"org_name,omitempty"` + APIToken string `sentinel:"" protobuf:"bytes,2,opt,name=api_token,json=apiToken,proto3" json:"api_token,omitempty"` + Production bool `sentinel:"" protobuf:"varint,3,opt,name=production,proto3" json:"production,omitempty"` + BaseURL string `sentinel:"" protobuf:"bytes,4,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + PrimaryEmail bool `sentinel:"" protobuf:"varint,5,opt,name=primary_email,json=primaryEmail,proto3" json:"primary_email,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OktaConfig) Reset() { *m = OktaConfig{} } +func (m *OktaConfig) String() string { return proto.CompactTextString(m) } +func (*OktaConfig) ProtoMessage() {} +func (*OktaConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{3} +} + +func (m *OktaConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OktaConfig.Unmarshal(m, b) +} +func (m *OktaConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OktaConfig.Marshal(b, m, deterministic) +} +func (m *OktaConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_OktaConfig.Merge(m, src) +} +func (m *OktaConfig) XXX_Size() int { + return xxx_messageInfo_OktaConfig.Size(m) +} +func (m *OktaConfig) XXX_DiscardUnknown() { + xxx_messageInfo_OktaConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_OktaConfig proto.InternalMessageInfo + +func (m *OktaConfig) GetOrgName() string { + if m != nil { + return m.OrgName + } + return "" +} + +func (m *OktaConfig) GetAPIToken() string { + if m != nil { + return m.APIToken + } + return "" +} + +func (m *OktaConfig) GetProduction() bool { + if m != nil { + return m.Production + } + return false +} + +func (m *OktaConfig) GetBaseURL() string { + if m != nil { + return m.BaseURL + } + return "" +} + +func (m *OktaConfig) GetPrimaryEmail() bool { + if m != nil { + return m.PrimaryEmail + } + return false +} + +// PingIDConfig contains PingID configuration information +type PingIDConfig struct { + UseBase64Key string `sentinel:"" protobuf:"bytes,1,opt,name=use_base64_key,json=useBase64Key,proto3" json:"use_base64_key,omitempty"` + UseSignature bool `sentinel:"" protobuf:"varint,2,opt,name=use_signature,json=useSignature,proto3" json:"use_signature,omitempty"` + Token string `sentinel:"" protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + IDPURL string `sentinel:"" protobuf:"bytes,4,opt,name=idp_url,json=idpUrl,proto3" json:"idp_url,omitempty"` + OrgAlias string `sentinel:"" protobuf:"bytes,5,opt,name=org_alias,json=orgAlias,proto3" json:"org_alias,omitempty"` + AdminURL string `sentinel:"" protobuf:"bytes,6,opt,name=admin_url,json=adminUrl,proto3" json:"admin_url,omitempty"` + AuthenticatorURL string `sentinel:"" protobuf:"bytes,7,opt,name=authenticator_url,json=authenticatorUrl,proto3" json:"authenticator_url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingIDConfig) Reset() { *m = PingIDConfig{} } +func (m *PingIDConfig) String() string { return proto.CompactTextString(m) } +func (*PingIDConfig) ProtoMessage() {} +func (*PingIDConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{4} +} + +func (m *PingIDConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PingIDConfig.Unmarshal(m, b) +} +func (m *PingIDConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PingIDConfig.Marshal(b, m, deterministic) +} +func (m *PingIDConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingIDConfig.Merge(m, src) +} +func (m *PingIDConfig) XXX_Size() int { + return xxx_messageInfo_PingIDConfig.Size(m) +} +func (m *PingIDConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PingIDConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PingIDConfig proto.InternalMessageInfo + +func (m *PingIDConfig) GetUseBase64Key() string { + if m != nil { + return m.UseBase64Key + } + return "" +} + +func (m *PingIDConfig) GetUseSignature() bool { + if m != nil { + return m.UseSignature + } + return false +} + +func (m *PingIDConfig) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *PingIDConfig) GetIDPURL() string { + if m != nil { + return m.IDPURL + } + return "" +} + +func (m *PingIDConfig) GetOrgAlias() string { + if m != nil { + return m.OrgAlias + } + return "" +} + +func (m *PingIDConfig) GetAdminURL() string { + if m != nil { + return m.AdminURL + } + return "" +} + +func (m *PingIDConfig) GetAuthenticatorURL() string { + if m != nil { + return m.AuthenticatorURL + } + return "" +} + +// Secret represents all the types of secrets which the entity can hold. +// Each MFA type should add a secret type to the oneof block in this message. +type Secret struct { + MethodName string `sentinel:"" protobuf:"bytes,1,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // Types that are valid to be assigned to Value: + // *Secret_TOTPSecret + Value isSecret_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (m *Secret) String() string { return proto.CompactTextString(m) } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{5} +} + +func (m *Secret) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Secret.Unmarshal(m, b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Secret.Marshal(b, m, deterministic) +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return xxx_messageInfo_Secret.Size(m) +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) +} + +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *Secret) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +type isSecret_Value interface { + isSecret_Value() +} + +type Secret_TOTPSecret struct { + TOTPSecret *TOTPSecret `sentinel:"" protobuf:"bytes,2,opt,name=totp_secret,json=totpSecret,proto3,oneof"` +} + +func (*Secret_TOTPSecret) isSecret_Value() {} + +func (m *Secret) GetValue() isSecret_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Secret) GetTOTPSecret() *TOTPSecret { + if x, ok := m.GetValue().(*Secret_TOTPSecret); ok { + return x.TOTPSecret + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Secret) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Secret_TOTPSecret)(nil), + } +} + +// TOTPSecret represents the secret that gets stored in the entity about a +// particular MFA method. This information is used to validate the MFA +// credential supplied over the API during request time. +type TOTPSecret struct { + Issuer string `sentinel:"" protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"` + Period uint32 `sentinel:"" protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty"` + Algorithm int32 `sentinel:"" protobuf:"varint,3,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + Digits int32 `sentinel:"" protobuf:"varint,4,opt,name=digits,proto3" json:"digits,omitempty"` + Skew uint32 `sentinel:"" protobuf:"varint,5,opt,name=skew,proto3" json:"skew,omitempty"` + KeySize uint32 `sentinel:"" protobuf:"varint,6,opt,name=key_size,json=keySize,proto3" json:"key_size,omitempty"` + // reserving 7 here just to keep parity with the config message above + AccountName string `sentinel:"" protobuf:"bytes,8,opt,name=account_name,json=accountName,proto3" json:"account_name,omitempty"` + Key string `sentinel:"" protobuf:"bytes,9,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TOTPSecret) Reset() { *m = TOTPSecret{} } +func (m *TOTPSecret) String() string { return proto.CompactTextString(m) } +func (*TOTPSecret) ProtoMessage() {} +func (*TOTPSecret) Descriptor() ([]byte, []int) { + return fileDescriptor_2eb73493aac0ba29, []int{6} +} + +func (m *TOTPSecret) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TOTPSecret.Unmarshal(m, b) +} +func (m *TOTPSecret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TOTPSecret.Marshal(b, m, deterministic) +} +func (m *TOTPSecret) XXX_Merge(src proto.Message) { + xxx_messageInfo_TOTPSecret.Merge(m, src) +} +func (m *TOTPSecret) XXX_Size() int { + return xxx_messageInfo_TOTPSecret.Size(m) +} +func (m *TOTPSecret) XXX_DiscardUnknown() { + xxx_messageInfo_TOTPSecret.DiscardUnknown(m) +} + +var xxx_messageInfo_TOTPSecret proto.InternalMessageInfo + +func (m *TOTPSecret) GetIssuer() string { + if m != nil { + return m.Issuer + } + return "" +} + +func (m *TOTPSecret) GetPeriod() uint32 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *TOTPSecret) GetAlgorithm() int32 { + if m != nil { + return m.Algorithm + } + return 0 +} + +func (m *TOTPSecret) GetDigits() int32 { + if m != nil { + return m.Digits + } + return 0 +} + +func (m *TOTPSecret) GetSkew() uint32 { + if m != nil { + return m.Skew + } + return 0 +} + +func (m *TOTPSecret) GetKeySize() uint32 { + if m != nil { + return m.KeySize + } + return 0 +} + +func (m *TOTPSecret) GetAccountName() string { + if m != nil { + return m.AccountName + } + return "" +} + +func (m *TOTPSecret) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func init() { + proto.RegisterType((*Config)(nil), "mfa.Config") + proto.RegisterType((*TOTPConfig)(nil), "mfa.TOTPConfig") + proto.RegisterType((*DuoConfig)(nil), "mfa.DuoConfig") + proto.RegisterType((*OktaConfig)(nil), "mfa.OktaConfig") + proto.RegisterType((*PingIDConfig)(nil), "mfa.PingIDConfig") + proto.RegisterType((*Secret)(nil), "mfa.Secret") + proto.RegisterType((*TOTPSecret)(nil), "mfa.TOTPSecret") +} + +func init() { proto.RegisterFile("helper/identity/mfa/types.proto", fileDescriptor_2eb73493aac0ba29) } + +var fileDescriptor_2eb73493aac0ba29 = []byte{ + // 762 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xdd, 0xae, 0xdc, 0x34, + 0x10, 0x66, 0xf7, 0x74, 0xf3, 0x33, 0xfb, 0xd3, 0xd6, 0x42, 0xb0, 0xa8, 0x40, 0xcb, 0x16, 0x44, + 0x25, 0xa4, 0x5d, 0x74, 0x40, 0x88, 0xdb, 0x2e, 0x05, 0xb5, 0x42, 0xa2, 0x55, 0xce, 0xe9, 0x0d, + 0x37, 0x91, 0x4f, 0xe2, 0x24, 0xd6, 0x26, 0x71, 0x6a, 0x3b, 0x45, 0x7b, 0x5e, 0x83, 0x57, 0xe0, + 0x29, 0x78, 0x10, 0xde, 0x02, 0xf1, 0x0a, 0x68, 0xc6, 0xce, 0x6e, 0x90, 0x78, 0x80, 0xde, 0x79, + 0xbe, 0x99, 0x6f, 0x3c, 0xfe, 0x66, 0xc6, 0xf0, 0xb0, 0x12, 0x75, 0x27, 0xf4, 0x4e, 0xe6, 0xa2, + 0xb5, 0xd2, 0x1e, 0x77, 0x4d, 0xc1, 0x77, 0xf6, 0xd8, 0x09, 0xb3, 0xed, 0xb4, 0xb2, 0x8a, 0x5d, + 0x34, 0x05, 0xdf, 0xfc, 0x3d, 0x85, 0xe0, 0x07, 0xd5, 0x16, 0xb2, 0x64, 0x0c, 0xee, 0xa0, 0x7b, + 0x3d, 0x79, 0x34, 0x79, 0x12, 0x27, 0x74, 0x46, 0xac, 0xe5, 0x8d, 0x58, 0x4f, 0x1d, 0x86, 0x67, + 0xb6, 0x82, 0xa9, 0xcc, 0xd7, 0x17, 0x84, 0x4c, 0x65, 0xce, 0xbe, 0x80, 0x55, 0xa3, 0xfa, 0xd6, + 0xa6, 0x3c, 0xcb, 0x84, 0x31, 0x4a, 0xaf, 0xef, 0x90, 0x6f, 0x49, 0xe8, 0x53, 0x0f, 0xb2, 0x2f, + 0xe1, 0x6e, 0x6f, 0x84, 0xc6, 0x14, 0x69, 0xa1, 0x74, 0xc3, 0xed, 0x7a, 0x46, 0x71, 0xab, 0x01, + 0xfe, 0x89, 0x50, 0x76, 0x09, 0x73, 0xab, 0x6c, 0x97, 0x66, 0x54, 0xd6, 0x3a, 0x78, 0x34, 0x79, + 0x32, 0xbf, 0xbc, 0xbb, 0x6d, 0x0a, 0xbe, 0xbd, 0x7e, 0x79, 0xfd, 0xca, 0x55, 0xfb, 0xfc, 0xbd, + 0x04, 0x30, 0xca, 0xd7, 0x7e, 0x09, 0x73, 0x75, 0xb0, 0x7c, 0xe0, 0x84, 0x23, 0xce, 0xcb, 0x83, + 0xe5, 0x67, 0x8e, 0x3a, 0x59, 0x6c, 0x07, 0x90, 0xf7, 0x6a, 0xa0, 0x44, 0x44, 0x59, 0x11, 0xe5, + 0x59, 0xaf, 0x4e, 0x8c, 0x38, 0x1f, 0x0c, 0xf6, 0x3d, 0x2c, 0x3b, 0xd9, 0x96, 0x32, 0x1f, 0x38, + 0x31, 0x71, 0xee, 0x13, 0xe7, 0x95, 0x6c, 0xcb, 0x17, 0xcf, 0x4e, 0xb4, 0x85, 0x8b, 0x74, 0xf6, + 0x3e, 0x82, 0xc0, 0x51, 0x36, 0x7f, 0x4e, 0x00, 0xce, 0xaf, 0x60, 0x1f, 0x40, 0x20, 0x8d, 0xe9, + 0x85, 0xf6, 0xaa, 0x7b, 0x0b, 0xf1, 0x4e, 0x68, 0xa9, 0x72, 0x52, 0x7e, 0x99, 0x78, 0x8b, 0x7d, + 0x0c, 0x31, 0xaf, 0x4b, 0xa5, 0xa5, 0xad, 0x1a, 0x6a, 0xc1, 0x2c, 0x39, 0x03, 0xc8, 0xca, 0x65, + 0x29, 0xad, 0xa1, 0x0e, 0xcc, 0x12, 0x6f, 0x61, 0x17, 0xcd, 0x41, 0xfc, 0x46, 0x7a, 0x2f, 0x13, + 0x3a, 0xb3, 0x8f, 0x20, 0x3a, 0x88, 0x63, 0x6a, 0xe4, 0xad, 0x20, 0x89, 0x97, 0x49, 0x78, 0x10, + 0xc7, 0x2b, 0x79, 0x2b, 0xd8, 0x87, 0x10, 0xbe, 0xd1, 0xce, 0x13, 0xba, 0x3c, 0x6f, 0x34, 0x3a, + 0x36, 0xbf, 0x4f, 0x20, 0x3e, 0x69, 0x83, 0x0d, 0x95, 0xad, 0x15, 0xa5, 0xe6, 0x56, 0xaa, 0x36, + 0x3d, 0x88, 0xa3, 0x7f, 0xc4, 0x6a, 0x04, 0xff, 0x2c, 0x8e, 0xec, 0x13, 0x00, 0x23, 0x32, 0x2d, + 0x2c, 0xc5, 0xb8, 0x51, 0x8a, 0x1d, 0x82, 0xee, 0xcf, 0x60, 0xc1, 0x3b, 0x99, 0x56, 0xca, 0x58, + 0x9a, 0x35, 0x37, 0x59, 0x73, 0xde, 0xc9, 0xe7, 0x1e, 0x62, 0x0f, 0x20, 0xee, 0x7a, 0x53, 0xa5, + 0xb2, 0x2d, 0x94, 0x9f, 0xae, 0x08, 0x81, 0x17, 0x6d, 0xa1, 0x36, 0x7f, 0x4c, 0x00, 0xce, 0x4d, + 0xc6, 0x87, 0x29, 0x5d, 0xa6, 0x94, 0xca, 0xd5, 0x13, 0x2a, 0x5d, 0xfe, 0xe2, 0xd3, 0xe0, 0x4d, + 0x56, 0x1d, 0x44, 0xeb, 0xeb, 0x88, 0x78, 0x27, 0xaf, 0xd1, 0x66, 0x9f, 0x02, 0x74, 0x5a, 0xe5, + 0x7d, 0x86, 0x65, 0x53, 0x11, 0x51, 0x32, 0x42, 0x30, 0xef, 0x0d, 0x37, 0x22, 0xed, 0x75, 0xed, + 0x4b, 0x08, 0xd1, 0x7e, 0xad, 0x6b, 0xf6, 0x18, 0x96, 0x9d, 0x96, 0x0d, 0xd7, 0xc7, 0x54, 0x34, + 0x5c, 0xd6, 0x24, 0x74, 0x94, 0x2c, 0x3c, 0xf8, 0x23, 0x62, 0x9b, 0x7f, 0x26, 0xb0, 0x18, 0x0f, + 0x09, 0xfb, 0x1c, 0x70, 0xf2, 0x53, 0x4c, 0xf2, 0xdd, 0xb7, 0x23, 0xf9, 0x16, 0xbd, 0x11, 0x7b, + 0x02, 0x51, 0x9d, 0xc7, 0xb0, 0xc4, 0x28, 0x23, 0xcb, 0x96, 0xdb, 0x5e, 0xbb, 0x55, 0x8c, 0x28, + 0xe8, 0x6a, 0xc0, 0xd8, 0xfb, 0x30, 0x73, 0x8f, 0x72, 0xda, 0x39, 0x03, 0xfb, 0x28, 0xf3, 0x6e, + 0x54, 0x70, 0x20, 0xf3, 0x0e, 0xeb, 0x7d, 0x00, 0x31, 0x4a, 0xc4, 0x6b, 0xc9, 0x8d, 0x5f, 0x42, + 0xd4, 0xec, 0x29, 0xda, 0x24, 0x52, 0xde, 0xc8, 0x96, 0x78, 0x81, 0x17, 0x09, 0x01, 0x64, 0x7e, + 0x05, 0xf7, 0x79, 0x6f, 0x2b, 0xfc, 0x51, 0x32, 0x6e, 0x95, 0xa6, 0xa0, 0x90, 0x82, 0xee, 0xfd, + 0xc7, 0xf1, 0x5a, 0xd7, 0x9b, 0x02, 0x82, 0x2b, 0xea, 0x32, 0x7b, 0x08, 0xf3, 0x46, 0xd8, 0x4a, + 0xe5, 0xe3, 0xb6, 0x80, 0x83, 0xa8, 0x33, 0xc3, 0xce, 0xbb, 0xa9, 0xa0, 0x37, 0x8e, 0x77, 0xde, + 0xa5, 0x19, 0x76, 0xde, 0x59, 0xfb, 0x10, 0x66, 0x6f, 0x79, 0xdd, 0x8b, 0xcd, 0x5f, 0x7e, 0xa7, + 0xfc, 0x65, 0xef, 0xe4, 0x4e, 0xe1, 0x90, 0x67, 0x19, 0x7d, 0x93, 0x24, 0x41, 0xe4, 0x87, 0xdc, + 0x61, 0xa4, 0xc1, 0x3d, 0xb8, 0xc0, 0x21, 0x88, 0xc9, 0x83, 0xc7, 0xfd, 0xd7, 0xbf, 0x6e, 0x4b, + 0x69, 0xab, 0xfe, 0x66, 0x9b, 0xa9, 0x66, 0x57, 0x71, 0x53, 0xc9, 0x4c, 0xe9, 0x6e, 0xf7, 0x96, + 0xf7, 0xb5, 0xdd, 0xfd, 0xcf, 0xff, 0x7e, 0x13, 0xd0, 0xd7, 0xfe, 0xcd, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xc7, 0x93, 0x21, 0xaa, 0xfd, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.proto b/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.proto new file mode 100644 index 00000000..69ec7cb6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/mfa/types.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; + +package mfa; + +// Config represents the configuration information used *along with* the MFA +// secret tied to caller's identity, to verify the MFA credentials supplied. +// Configuration information differs by type. Handler of each type should know +// what to expect from the Config field. +message Config { + string type = 1; + string name = 2; + string id = 3; + string mount_accessor = 4; + string username_format = 5; + oneof config { + TOTPConfig totp_config = 6; + OktaConfig okta_config = 7; + DuoConfig duo_config = 8; + PingIDConfig pingid_config = 9; + } +} + +// TOTPConfig represents the configuration information required to generate +// a TOTP key. The generated key will be stored in the entity along with these +// options. Validation of credentials supplied over the API will be validated +// by the information stored in the entity and not from the values in the +// configuration. +message TOTPConfig { + string issuer = 1; + uint32 period = 2; + int32 algorithm = 3; + int32 digits = 4; + uint32 skew = 5; + uint32 key_size = 6; + int32 qr_size = 7; +} + +// DuoConfig represents the configuration information required to perform +// Duo authentication. +message DuoConfig { + string integration_key = 1; + string secret_key = 2; + string api_hostname = 3; + string push_info = 4; +} + +// OktaConfig contains Okta configuration parameters required to perform Okta +// authentication. +message OktaConfig { + string org_name = 1; + string api_token = 2; + bool production = 3; + string base_url = 4; + bool primary_email = 5; +} + +// PingIDConfig contains PingID configuration information +message PingIDConfig { + string use_base64_key = 1; + bool use_signature = 2; + string token = 3; + string idp_url = 4; + string org_alias = 5; + string admin_url = 6; + string authenticator_url = 7; +} + +// Secret represents all the types of secrets which the entity can hold. +// Each MFA type should add a secret type to the oneof block in this message. +message Secret { + string method_name = 1; + oneof value { + TOTPSecret totp_secret = 2; + } +} + +// TOTPSecret represents the secret that gets stored in the entity about a +// particular MFA method. This information is used to validate the MFA +// credential supplied over the API during request time. +message TOTPSecret { + string issuer = 1; + uint32 period = 2; + int32 algorithm = 3; + int32 digits = 4; + uint32 skew = 5; + uint32 key_size = 6; + // reserving 7 here just to keep parity with the config message above + string account_name = 8; + string key = 9; +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/sentinel.go b/vendor/github.com/hashicorp/vault/helper/identity/sentinel.go new file mode 100644 index 00000000..bf3cfff5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/sentinel.go @@ -0,0 +1,125 @@ +package identity + +import "github.com/golang/protobuf/ptypes" + +func (e *Entity) SentinelGet(key string) (interface{}, error) { + if e == nil { + return nil, nil + } + switch key { + case "aliases": + return e.Aliases, nil + case "id": + return e.ID, nil + case "meta", "metadata": + return e.Metadata, nil + case "name": + return e.Name, nil + case "creation_time": + return ptypes.TimestampString(e.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(e.LastUpdateTime), nil + case "merged_entity_ids": + return e.MergedEntityIDs, nil + case "policies": + return e.Policies, nil + } + + return nil, nil +} + +func (e *Entity) SentinelKeys() []string { + return []string{ + "id", + "aliases", + "metadata", + "meta", + "name", + "creation_time", + "last_update_time", + "merged_entity_ids", + "policies", + } +} + +func (p *Alias) SentinelGet(key string) (interface{}, error) { + if p == nil { + return nil, nil + } + switch key { + case "id": + return p.ID, nil + case "mount_type": + return p.MountType, nil + case "mount_accessor": + return p.MountAccessor, nil + case "mount_path": + return p.MountPath, nil + case "meta", "metadata": + return p.Metadata, nil + case "name": + return p.Name, nil + case "creation_time": + return ptypes.TimestampString(p.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(p.LastUpdateTime), nil + case "merged_from_entity_ids": + return p.MergedFromCanonicalIDs, nil + } + + return nil, nil +} + +func (a *Alias) SentinelKeys() []string { + return []string{ + "id", + "mount_type", + "mount_path", + "meta", + "metadata", + "name", + "creation_time", + "last_update_time", + "merged_from_entity_ids", + } +} + +func (g *Group) SentinelGet(key string) (interface{}, error) { + if g == nil { + return nil, nil + } + switch key { + case "id": + return g.ID, nil + case "name": + return g.Name, nil + case "policies": + return g.Policies, nil + case "parent_group_ids": + return g.ParentGroupIDs, nil + case "member_entity_ids": + return g.MemberEntityIDs, nil + case "meta", "metadata": + return g.Metadata, nil + case "creation_time": + return ptypes.TimestampString(g.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(g.LastUpdateTime), nil + } + + return nil, nil +} + +func (g *Group) SentinelKeys() []string { + return []string{ + "id", + "name", + "policies", + "parent_group_ids", + "member_entity_ids", + "metadata", + "meta", + "creation_time", + "last_update_time", + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/templating.go b/vendor/github.com/hashicorp/vault/helper/identity/templating.go new file mode 100644 index 00000000..aea81d3f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/templating.go @@ -0,0 +1,359 @@ +package identity + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" +) + +var ( + ErrUnbalancedTemplatingCharacter = errors.New("unbalanced templating characters") + ErrNoEntityAttachedToToken = errors.New("string contains entity template directives but no entity was provided") + ErrNoGroupsAttachedToToken = errors.New("string contains groups template directives but no groups were provided") + ErrTemplateValueNotFound = errors.New("no value could be found for one of the template directives") +) + +const ( + ACLTemplating = iota // must be the first value for backwards compatibility + JSONTemplating +) + +type PopulateStringInput struct { + String string + ValidityCheckOnly bool + Entity *Entity + Groups []*Group + Namespace *namespace.Namespace + Mode int // processing mode, ACLTemplate or JSONTemplating + Now time.Time // optional, defaults to current time + + templateHandler templateHandlerFunc + groupIDs []string + groupNames []string +} + +// templateHandlerFunc allows generating string outputs based on data type, and +// different handlers can be used based on mode. For example in ACL mode, strings +// are emitted verbatim, but they're wrapped in double quotes for JSON mode. And +// some structures, like slices, might be rendered in one mode but prohibited in +// another. +type templateHandlerFunc func(interface{}, ...string) (string, error) + +// aclTemplateHandler processes known parameter data types when operating +// in ACL mode. +func aclTemplateHandler(v interface{}, keys ...string) (string, error) { + switch t := v.(type) { + case string: + if t == "" { + return "", ErrTemplateValueNotFound + } + return t, nil + case []string: + return "", ErrTemplateValueNotFound + case map[string]string: + if len(keys) > 0 { + val, ok := t[keys[0]] + if ok { + return val, nil + } + } + return "", ErrTemplateValueNotFound + } + + return "", fmt.Errorf("unknown type: %T", v) +} + +// jsonTemplateHandler processes known parameter data types when operating +// in JSON mode. +func jsonTemplateHandler(v interface{}, keys ...string) (string, error) { + jsonMarshaller := func(v interface{}) (string, error) { + enc, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(enc), nil + } + + switch t := v.(type) { + case string: + return strconv.Quote(t), nil + case []string: + return jsonMarshaller(t) + case map[string]string: + if len(keys) > 0 { + return strconv.Quote(t[keys[0]]), nil + } + if t == nil { + return "{}", nil + } + return jsonMarshaller(t) + } + + return "", fmt.Errorf("unknown type: %T", v) +} + +func PopulateString(p PopulateStringInput) (bool, string, error) { + if p.String == "" { + return false, "", nil + } + + // preprocess groups + for _, g := range p.Groups { + p.groupNames = append(p.groupNames, g.Name) + p.groupIDs = append(p.groupIDs, g.ID) + } + + // set up mode-specific handler + switch p.Mode { + case ACLTemplating: + p.templateHandler = aclTemplateHandler + case JSONTemplating: + p.templateHandler = jsonTemplateHandler + default: + return false, "", fmt.Errorf("unknown mode %q", p.Mode) + } + + var subst bool + splitStr := strings.Split(p.String, "{{") + + if len(splitStr) >= 1 { + if strings.Contains(splitStr[0], "}}") { + return false, "", ErrUnbalancedTemplatingCharacter + } + if len(splitStr) == 1 { + return false, p.String, nil + } + } + + var b strings.Builder + if !p.ValidityCheckOnly { + b.Grow(2 * len(p.String)) + } + + for i, str := range splitStr { + if i == 0 { + if !p.ValidityCheckOnly { + b.WriteString(str) + } + continue + } + splitPiece := strings.Split(str, "}}") + switch len(splitPiece) { + case 2: + subst = true + if !p.ValidityCheckOnly { + tmplStr, err := performTemplating(strings.TrimSpace(splitPiece[0]), &p) + if err != nil { + return false, "", err + } + b.WriteString(tmplStr) + b.WriteString(splitPiece[1]) + } + default: + return false, "", ErrUnbalancedTemplatingCharacter + } + } + + return subst, b.String(), nil +} + +func performTemplating(input string, p *PopulateStringInput) (string, error) { + + performAliasTemplating := func(trimmed string, alias *Alias) (string, error) { + switch { + case trimmed == "id": + return p.templateHandler(alias.ID) + + case trimmed == "name": + return p.templateHandler(alias.Name) + + case trimmed == "metadata": + return p.templateHandler(alias.Metadata) + + case strings.HasPrefix(trimmed, "metadata."): + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(alias.Metadata, split[1]) + } + + return "", ErrTemplateValueNotFound + } + + performEntityTemplating := func(trimmed string) (string, error) { + switch { + case trimmed == "id": + return p.templateHandler(p.Entity.ID) + + case trimmed == "name": + return p.templateHandler(p.Entity.Name) + + case trimmed == "metadata": + return p.templateHandler(p.Entity.Metadata) + + case strings.HasPrefix(trimmed, "metadata."): + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(p.Entity.Metadata, split[1]) + + case trimmed == "groups.names": + return p.templateHandler(p.groupNames) + + case trimmed == "groups.ids": + return p.templateHandler(p.groupIDs) + + case strings.HasPrefix(trimmed, "aliases."): + split := strings.SplitN(strings.TrimPrefix(trimmed, "aliases."), ".", 2) + if len(split) != 2 { + return "", errors.New("invalid alias selector") + } + var alias *Alias + for _, a := range p.Entity.Aliases { + if split[0] == a.MountAccessor { + alias = a + break + } + } + if alias == nil { + if p.Mode == ACLTemplating { + return "", errors.New("alias not found") + } + + // An empty alias is sufficient for generating defaults + alias = &Alias{Metadata: make(map[string]string)} + } + return performAliasTemplating(split[1], alias) + } + + return "", ErrTemplateValueNotFound + } + + performGroupsTemplating := func(trimmed string) (string, error) { + var ids bool + + selectorSplit := strings.SplitN(trimmed, ".", 2) + + switch { + case len(selectorSplit) != 2: + return "", errors.New("invalid groups selector") + + case selectorSplit[0] == "ids": + ids = true + + case selectorSplit[0] == "names": + + default: + return "", errors.New("invalid groups selector") + } + trimmed = selectorSplit[1] + + accessorSplit := strings.SplitN(trimmed, ".", 2) + if len(accessorSplit) != 2 { + return "", errors.New("invalid groups accessor") + } + var found *Group + for _, group := range p.Groups { + var compare string + if ids { + compare = group.ID + } else { + if p.Namespace != nil && group.NamespaceID == p.Namespace.ID { + compare = group.Name + } else { + continue + } + } + + if compare == accessorSplit[0] { + found = group + break + } + } + + if found == nil { + return "", fmt.Errorf("entity is not a member of group %q", accessorSplit[0]) + } + + trimmed = accessorSplit[1] + + switch { + case trimmed == "id": + return found.ID, nil + + case trimmed == "name": + if found.Name == "" { + return "", ErrTemplateValueNotFound + } + return found.Name, nil + + case strings.HasPrefix(trimmed, "metadata."): + val, ok := found.Metadata[strings.TrimPrefix(trimmed, "metadata.")] + if !ok { + return "", ErrTemplateValueNotFound + } + return val, nil + } + + return "", ErrTemplateValueNotFound + } + + performTimeTemplating := func(trimmed string) (string, error) { + now := p.Now + if now.IsZero() { + now = time.Now() + } + + opsSplit := strings.SplitN(trimmed, ".", 3) + + if opsSplit[0] != "now" { + return "", fmt.Errorf("invalid time selector %q", opsSplit[0]) + } + + result := now + switch len(opsSplit) { + case 1: + // return current time + case 2: + return "", errors.New("missing time operand") + + case 3: + duration, err := time.ParseDuration(opsSplit[2]) + if err != nil { + return "", errwrap.Wrapf("invalid duration: {{err}}", err) + } + + switch opsSplit[1] { + case "plus": + result = result.Add(duration) + case "minus": + result = result.Add(-duration) + default: + return "", fmt.Errorf("invalid time operator %q", opsSplit[1]) + } + } + + return strconv.FormatInt(result.Unix(), 10), nil + } + + switch { + case strings.HasPrefix(input, "identity.entity."): + if p.Entity == nil { + return "", ErrNoEntityAttachedToToken + } + return performEntityTemplating(strings.TrimPrefix(input, "identity.entity.")) + + case strings.HasPrefix(input, "identity.groups."): + if len(p.Groups) == 0 { + return "", ErrNoGroupsAttachedToToken + } + return performGroupsTemplating(strings.TrimPrefix(input, "identity.groups.")) + + case strings.HasPrefix(input, "time."): + return performTimeTemplating(strings.TrimPrefix(input, "time.")) + } + + return "", ErrTemplateValueNotFound +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/types.pb.go b/vendor/github.com/hashicorp/vault/helper/identity/types.pb.go new file mode 100644 index 00000000..71d5267a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/types.pb.go @@ -0,0 +1,793 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: helper/identity/types.proto + +package identity + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + mfa "github.com/hashicorp/vault/helper/identity/mfa" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Group represents an identity group. +type Group struct { + // ID is the unique identifier for this group + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Name is the unique name for this group + Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Policies are the vault policies to be granted to members of this group + Policies []string `sentinel:"" protobuf:"bytes,3,rep,name=policies,proto3" json:"policies,omitempty"` + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + ParentGroupIDs []string `sentinel:"" protobuf:"bytes,4,rep,name=parent_group_ids,json=parentGroupIds,proto3" json:"parent_group_ids,omitempty"` + // MemberEntityIDs are the identifiers of entities which are members of this + // group + MemberEntityIDs []string `sentinel:"" protobuf:"bytes,5,rep,name=member_entity_ids,json=memberEntityIDs,proto3" json:"member_entity_ids,omitempty"` + // Metadata represents the custom data tied with this group + Metadata map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CreationTime is the time at which this group was created + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,7,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // LastUpdateTime is the time at which this group was last modified + LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,8,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + ModifyIndex uint64 `sentinel:"" protobuf:"varint,9,opt,name=modify_index,json=modifyIndex,proto3" json:"modify_index,omitempty"` + // BucketKey is the path of the storage packer key into which this group is + // stored. + BucketKey string `sentinel:"" protobuf:"bytes,10,opt,name=bucket_key,json=bucketKey,proto3" json:"bucket_key,omitempty"` + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + Alias *Alias `sentinel:"" protobuf:"bytes,11,opt,name=alias,proto3" json:"alias,omitempty"` + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + Type string `sentinel:"" protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty"` + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + NamespaceID string `sentinel:"" protobuf:"bytes,13,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_319efdc71a5d7416, []int{0} +} + +func (m *Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Group.Unmarshal(m, b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return xxx_messageInfo_Group.Size(m) +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Group) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Group) GetPolicies() []string { + if m != nil { + return m.Policies + } + return nil +} + +func (m *Group) GetParentGroupIDs() []string { + if m != nil { + return m.ParentGroupIDs + } + return nil +} + +func (m *Group) GetMemberEntityIDs() []string { + if m != nil { + return m.MemberEntityIDs + } + return nil +} + +func (m *Group) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Group) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Group) GetLastUpdateTime() *timestamp.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +func (m *Group) GetModifyIndex() uint64 { + if m != nil { + return m.ModifyIndex + } + return 0 +} + +func (m *Group) GetBucketKey() string { + if m != nil { + return m.BucketKey + } + return "" +} + +func (m *Group) GetAlias() *Alias { + if m != nil { + return m.Alias + } + return nil +} + +func (m *Group) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Group) GetNamespaceID() string { + if m != nil { + return m.NamespaceID + } + return "" +} + +// Entity represents an entity that gets persisted and indexed. +// Entity is fundamentally composed of zero or many aliases. +type Entity struct { + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + Aliases []*Alias `sentinel:"" protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"` + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + ID string `sentinel:"" protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CreationTime is the time at which this entity is first created. + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,6,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + MergedEntityIDs []string `sentinel:"" protobuf:"bytes,7,rep,name=merged_entity_ids,json=mergedEntityIDs,proto3" json:"merged_entity_ids,omitempty"` + // Policies the entity is entitled to + Policies []string `sentinel:"" protobuf:"bytes,8,rep,name=policies,proto3" json:"policies,omitempty"` + // BucketKey is the path of the storage packer key into which this entity is + // stored. + BucketKey string `sentinel:"" protobuf:"bytes,9,opt,name=bucket_key,json=bucketKey,proto3" json:"bucket_key,omitempty"` + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + MFASecrets map[string]*mfa.Secret `sentinel:"" protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Disabled indicates whether tokens associated with the account should not + // be able to be used + Disabled bool `sentinel:"" protobuf:"varint,11,opt,name=disabled,proto3" json:"disabled,omitempty"` + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + NamespaceID string `sentinel:"" protobuf:"bytes,12,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { + return fileDescriptor_319efdc71a5d7416, []int{1} +} + +func (m *Entity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Entity.Unmarshal(m, b) +} +func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Entity.Marshal(b, m, deterministic) +} +func (m *Entity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entity.Merge(m, src) +} +func (m *Entity) XXX_Size() int { + return xxx_messageInfo_Entity.Size(m) +} +func (m *Entity) XXX_DiscardUnknown() { + xxx_messageInfo_Entity.DiscardUnknown(m) +} + +var xxx_messageInfo_Entity proto.InternalMessageInfo + +func (m *Entity) GetAliases() []*Alias { + if m != nil { + return m.Aliases + } + return nil +} + +func (m *Entity) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Entity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Entity) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Entity) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Entity) GetLastUpdateTime() *timestamp.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +func (m *Entity) GetMergedEntityIDs() []string { + if m != nil { + return m.MergedEntityIDs + } + return nil +} + +func (m *Entity) GetPolicies() []string { + if m != nil { + return m.Policies + } + return nil +} + +func (m *Entity) GetBucketKey() string { + if m != nil { + return m.BucketKey + } + return "" +} + +func (m *Entity) GetMFASecrets() map[string]*mfa.Secret { + if m != nil { + return m.MFASecrets + } + return nil +} + +func (m *Entity) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *Entity) GetNamespaceID() string { + if m != nil { + return m.NamespaceID + } + return "" +} + +// Alias represents the alias that gets stored inside of the +// entity object in storage and also represents in an in-memory index of an +// alias object. +type Alias struct { + // ID is the unique identifier that represents this alias + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // CanonicalID is the entity identifier to which this alias belongs to + CanonicalID string `sentinel:"" protobuf:"bytes,2,opt,name=canonical_id,json=canonicalId,proto3" json:"canonical_id,omitempty"` + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + MountType string `sentinel:"" protobuf:"bytes,3,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + MountAccessor string `sentinel:"" protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + MountPath string `sentinel:"" protobuf:"bytes,5,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + Metadata map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + Name string `sentinel:"" protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // CreationTime is the time at which this alias was first created + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,8,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + // MergedFromCanonicalIDs is the FIFO history of merging activity + MergedFromCanonicalIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_canonical_ids,json=mergedFromCanonicalIds,proto3" json:"merged_from_canonical_ids,omitempty"` + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + NamespaceID string `sentinel:"" protobuf:"bytes,11,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Alias) Reset() { *m = Alias{} } +func (m *Alias) String() string { return proto.CompactTextString(m) } +func (*Alias) ProtoMessage() {} +func (*Alias) Descriptor() ([]byte, []int) { + return fileDescriptor_319efdc71a5d7416, []int{2} +} + +func (m *Alias) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Alias.Unmarshal(m, b) +} +func (m *Alias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Alias.Marshal(b, m, deterministic) +} +func (m *Alias) XXX_Merge(src proto.Message) { + xxx_messageInfo_Alias.Merge(m, src) +} +func (m *Alias) XXX_Size() int { + return xxx_messageInfo_Alias.Size(m) +} +func (m *Alias) XXX_DiscardUnknown() { + xxx_messageInfo_Alias.DiscardUnknown(m) +} + +var xxx_messageInfo_Alias proto.InternalMessageInfo + +func (m *Alias) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Alias) GetCanonicalID() string { + if m != nil { + return m.CanonicalID + } + return "" +} + +func (m *Alias) GetMountType() string { + if m != nil { + return m.MountType + } + return "" +} + +func (m *Alias) GetMountAccessor() string { + if m != nil { + return m.MountAccessor + } + return "" +} + +func (m *Alias) GetMountPath() string { + if m != nil { + return m.MountPath + } + return "" +} + +func (m *Alias) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Alias) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Alias) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Alias) GetLastUpdateTime() *timestamp.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +func (m *Alias) GetMergedFromCanonicalIDs() []string { + if m != nil { + return m.MergedFromCanonicalIDs + } + return nil +} + +func (m *Alias) GetNamespaceID() string { + if m != nil { + return m.NamespaceID + } + return "" +} + +// Deprecated. Retained for backwards compatibility. +type EntityStorageEntry struct { + Personas []*PersonaIndexEntry `sentinel:"" protobuf:"bytes,1,rep,name=personas,proto3" json:"personas,omitempty"` + ID string `sentinel:"" protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,6,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + MergedEntityIDs []string `sentinel:"" protobuf:"bytes,7,rep,name=merged_entity_ids,json=mergedEntityIDs,proto3" json:"merged_entity_ids,omitempty"` + Policies []string `sentinel:"" protobuf:"bytes,8,rep,name=policies,proto3" json:"policies,omitempty"` + BucketKeyHash string `sentinel:"" protobuf:"bytes,9,opt,name=bucket_key_hash,json=bucketKeyHash,proto3" json:"bucket_key_hash,omitempty"` + MFASecrets map[string]*mfa.Secret `sentinel:"" protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EntityStorageEntry) Reset() { *m = EntityStorageEntry{} } +func (m *EntityStorageEntry) String() string { return proto.CompactTextString(m) } +func (*EntityStorageEntry) ProtoMessage() {} +func (*EntityStorageEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_319efdc71a5d7416, []int{3} +} + +func (m *EntityStorageEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EntityStorageEntry.Unmarshal(m, b) +} +func (m *EntityStorageEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EntityStorageEntry.Marshal(b, m, deterministic) +} +func (m *EntityStorageEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityStorageEntry.Merge(m, src) +} +func (m *EntityStorageEntry) XXX_Size() int { + return xxx_messageInfo_EntityStorageEntry.Size(m) +} +func (m *EntityStorageEntry) XXX_DiscardUnknown() { + xxx_messageInfo_EntityStorageEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityStorageEntry proto.InternalMessageInfo + +func (m *EntityStorageEntry) GetPersonas() []*PersonaIndexEntry { + if m != nil { + return m.Personas + } + return nil +} + +func (m *EntityStorageEntry) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *EntityStorageEntry) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EntityStorageEntry) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *EntityStorageEntry) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *EntityStorageEntry) GetLastUpdateTime() *timestamp.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +func (m *EntityStorageEntry) GetMergedEntityIDs() []string { + if m != nil { + return m.MergedEntityIDs + } + return nil +} + +func (m *EntityStorageEntry) GetPolicies() []string { + if m != nil { + return m.Policies + } + return nil +} + +func (m *EntityStorageEntry) GetBucketKeyHash() string { + if m != nil { + return m.BucketKeyHash + } + return "" +} + +func (m *EntityStorageEntry) GetMFASecrets() map[string]*mfa.Secret { + if m != nil { + return m.MFASecrets + } + return nil +} + +// Deprecated. Retained for backwards compatibility. +type PersonaIndexEntry struct { + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + EntityID string `sentinel:"" protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + MountType string `sentinel:"" protobuf:"bytes,3,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + MountAccessor string `sentinel:"" protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + MountPath string `sentinel:"" protobuf:"bytes,5,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + Metadata map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Name string `sentinel:"" protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,8,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + MergedFromEntityIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_entity_ids,json=mergedFromEntityIDs,proto3" json:"merged_from_entity_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PersonaIndexEntry) Reset() { *m = PersonaIndexEntry{} } +func (m *PersonaIndexEntry) String() string { return proto.CompactTextString(m) } +func (*PersonaIndexEntry) ProtoMessage() {} +func (*PersonaIndexEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_319efdc71a5d7416, []int{4} +} + +func (m *PersonaIndexEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PersonaIndexEntry.Unmarshal(m, b) +} +func (m *PersonaIndexEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PersonaIndexEntry.Marshal(b, m, deterministic) +} +func (m *PersonaIndexEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersonaIndexEntry.Merge(m, src) +} +func (m *PersonaIndexEntry) XXX_Size() int { + return xxx_messageInfo_PersonaIndexEntry.Size(m) +} +func (m *PersonaIndexEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PersonaIndexEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_PersonaIndexEntry proto.InternalMessageInfo + +func (m *PersonaIndexEntry) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *PersonaIndexEntry) GetEntityID() string { + if m != nil { + return m.EntityID + } + return "" +} + +func (m *PersonaIndexEntry) GetMountType() string { + if m != nil { + return m.MountType + } + return "" +} + +func (m *PersonaIndexEntry) GetMountAccessor() string { + if m != nil { + return m.MountAccessor + } + return "" +} + +func (m *PersonaIndexEntry) GetMountPath() string { + if m != nil { + return m.MountPath + } + return "" +} + +func (m *PersonaIndexEntry) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PersonaIndexEntry) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PersonaIndexEntry) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *PersonaIndexEntry) GetLastUpdateTime() *timestamp.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +func (m *PersonaIndexEntry) GetMergedFromEntityIDs() []string { + if m != nil { + return m.MergedFromEntityIDs + } + return nil +} + +func init() { + proto.RegisterType((*Group)(nil), "identity.Group") + proto.RegisterMapType((map[string]string)(nil), "identity.Group.MetadataEntry") + proto.RegisterType((*Entity)(nil), "identity.Entity") + proto.RegisterMapType((map[string]string)(nil), "identity.Entity.MetadataEntry") + proto.RegisterMapType((map[string]*mfa.Secret)(nil), "identity.Entity.MFASecretsEntry") + proto.RegisterType((*Alias)(nil), "identity.Alias") + proto.RegisterMapType((map[string]string)(nil), "identity.Alias.MetadataEntry") + proto.RegisterType((*EntityStorageEntry)(nil), "identity.EntityStorageEntry") + proto.RegisterMapType((map[string]string)(nil), "identity.EntityStorageEntry.MetadataEntry") + proto.RegisterMapType((map[string]*mfa.Secret)(nil), "identity.EntityStorageEntry.MFASecretsEntry") + proto.RegisterType((*PersonaIndexEntry)(nil), "identity.PersonaIndexEntry") + proto.RegisterMapType((map[string]string)(nil), "identity.PersonaIndexEntry.MetadataEntry") +} + +func init() { proto.RegisterFile("helper/identity/types.proto", fileDescriptor_319efdc71a5d7416) } + +var fileDescriptor_319efdc71a5d7416 = []byte{ + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x5d, 0x8f, 0xdb, 0x44, + 0x14, 0x55, 0x3e, 0x9c, 0xd8, 0xd7, 0xf9, 0xd8, 0x0e, 0xa8, 0x32, 0x59, 0x95, 0x66, 0x2b, 0x15, + 0xa5, 0xab, 0xca, 0x91, 0xb6, 0x0f, 0xd0, 0xf2, 0x80, 0x16, 0xd8, 0x42, 0x40, 0x95, 0x2a, 0xb7, + 0xbc, 0xf0, 0x62, 0x4d, 0xec, 0x49, 0x32, 0xaa, 0xed, 0xb1, 0x3c, 0xe3, 0x8a, 0xfc, 0x03, 0x1e, + 0x78, 0xe0, 0x07, 0xf1, 0xc7, 0x78, 0x43, 0x33, 0xe3, 0xaf, 0xc6, 0xed, 0xd2, 0x15, 0x11, 0x02, + 0xf5, 0xcd, 0x73, 0xee, 0x9d, 0x3b, 0x37, 0xe7, 0x9e, 0x7b, 0x14, 0x38, 0xdd, 0x91, 0x28, 0x25, + 0xd9, 0x92, 0x86, 0x24, 0x11, 0x54, 0xec, 0x97, 0x62, 0x9f, 0x12, 0xee, 0xa6, 0x19, 0x13, 0x0c, + 0x99, 0x25, 0x3a, 0xbb, 0xbb, 0x65, 0x6c, 0x1b, 0x91, 0xa5, 0xc2, 0xd7, 0xf9, 0x66, 0x29, 0x68, + 0x4c, 0xb8, 0xc0, 0x71, 0xaa, 0x53, 0x67, 0x77, 0x0f, 0xeb, 0xc4, 0x1b, 0xdc, 0xac, 0x75, 0xef, + 0x8f, 0x3e, 0x18, 0xdf, 0x65, 0x2c, 0x4f, 0xd1, 0x04, 0xba, 0x34, 0x74, 0x3a, 0xf3, 0xce, 0xc2, + 0xf2, 0xba, 0x34, 0x44, 0x08, 0xfa, 0x09, 0x8e, 0x89, 0xd3, 0x55, 0x88, 0xfa, 0x46, 0x33, 0x30, + 0x53, 0x16, 0xd1, 0x80, 0x12, 0xee, 0xf4, 0xe6, 0xbd, 0x85, 0xe5, 0x55, 0x67, 0xb4, 0x80, 0x93, + 0x14, 0x67, 0x24, 0x11, 0xfe, 0x56, 0xd6, 0xf3, 0x69, 0xc8, 0x9d, 0xbe, 0xca, 0x99, 0x68, 0x5c, + 0x3d, 0xb3, 0x0a, 0x39, 0x3a, 0x87, 0x5b, 0x31, 0x89, 0xd7, 0x24, 0xf3, 0x75, 0x53, 0x2a, 0xd5, + 0x50, 0xa9, 0x53, 0x1d, 0xb8, 0x52, 0xb8, 0xcc, 0x7d, 0x0c, 0x66, 0x4c, 0x04, 0x0e, 0xb1, 0xc0, + 0xce, 0x60, 0xde, 0x5b, 0xd8, 0x17, 0x77, 0xdc, 0xf2, 0xc7, 0xb8, 0xaa, 0xa2, 0xfb, 0xac, 0x88, + 0x5f, 0x25, 0x22, 0xdb, 0x7b, 0x55, 0x3a, 0xfa, 0x0a, 0xc6, 0x41, 0x46, 0xb0, 0xa0, 0x2c, 0xf1, + 0x25, 0x2f, 0xce, 0x70, 0xde, 0x59, 0xd8, 0x17, 0x33, 0x57, 0x93, 0xe6, 0x96, 0xa4, 0xb9, 0x2f, + 0x4b, 0xd2, 0xbc, 0x51, 0x79, 0x41, 0x42, 0xe8, 0x5b, 0x38, 0x89, 0x30, 0x17, 0x7e, 0x9e, 0x86, + 0x58, 0x10, 0x5d, 0xc3, 0xfc, 0xdb, 0x1a, 0x13, 0x79, 0xe7, 0x27, 0x75, 0x45, 0x55, 0x39, 0x83, + 0x51, 0xcc, 0x42, 0xba, 0xd9, 0xfb, 0x34, 0x09, 0xc9, 0x2f, 0x8e, 0x35, 0xef, 0x2c, 0xfa, 0x9e, + 0xad, 0xb1, 0x95, 0x84, 0xd0, 0x1d, 0x80, 0x75, 0x1e, 0xbc, 0x22, 0xc2, 0x7f, 0x45, 0xf6, 0x0e, + 0x28, 0xc2, 0x2d, 0x8d, 0xfc, 0x48, 0xf6, 0xe8, 0x3e, 0x18, 0x38, 0xa2, 0x98, 0x3b, 0xb6, 0x7a, + 0x7c, 0x5a, 0x13, 0x70, 0x29, 0x61, 0x4f, 0x47, 0xe5, 0xc0, 0xe4, 0x64, 0x9d, 0x91, 0x1e, 0x98, + 0xfc, 0x96, 0x8f, 0xcb, 0xc1, 0xf1, 0x14, 0x07, 0xc4, 0xa7, 0xa1, 0x33, 0x56, 0x31, 0xbb, 0xc2, + 0x56, 0xe1, 0xec, 0x4b, 0x18, 0xbf, 0xc1, 0x20, 0x3a, 0x81, 0x9e, 0x6c, 0x43, 0x2b, 0x41, 0x7e, + 0xa2, 0x8f, 0xc1, 0x78, 0x8d, 0xa3, 0xbc, 0xd4, 0x82, 0x3e, 0x3c, 0xe9, 0x7e, 0xd1, 0xb9, 0xf7, + 0x9b, 0x01, 0x03, 0x3d, 0x2c, 0xf4, 0x00, 0x86, 0xaa, 0x0f, 0xc2, 0x9d, 0x8e, 0x1a, 0x54, 0xab, + 0xcf, 0x32, 0x5e, 0x48, 0xad, 0xdb, 0x92, 0x5a, 0xaf, 0x21, 0xb5, 0x27, 0x8d, 0xc1, 0xf7, 0x55, + 0xbd, 0x4f, 0xeb, 0x7a, 0xfa, 0xc9, 0xf7, 0x9f, 0xbc, 0x71, 0x84, 0xc9, 0x0f, 0x6e, 0x3c, 0x79, + 0xa5, 0xf3, 0x6c, 0x4b, 0xc2, 0xa6, 0xce, 0x87, 0xa5, 0xce, 0x65, 0xa0, 0xd6, 0x79, 0x73, 0xb3, + 0xcc, 0x83, 0xcd, 0x7a, 0x53, 0x1e, 0xd6, 0xa1, 0x3c, 0x2e, 0xc1, 0x8e, 0x37, 0xd8, 0xe7, 0x24, + 0xc8, 0x88, 0xe0, 0x0e, 0x28, 0xb2, 0xe6, 0x6d, 0xb2, 0x36, 0xf8, 0x85, 0x4e, 0xd1, 0x74, 0x41, + 0x5c, 0x01, 0xf2, 0xf5, 0x90, 0x72, 0xbc, 0x8e, 0x48, 0xa8, 0x44, 0x66, 0x7a, 0xd5, 0xb9, 0x25, + 0xa1, 0xd1, 0x71, 0x25, 0x34, 0xfb, 0x01, 0xa6, 0x07, 0xad, 0xbd, 0xe5, 0xfa, 0x59, 0xf3, 0xba, + 0x7d, 0x61, 0xbb, 0xf1, 0x06, 0xbb, 0xfa, 0x4e, 0x53, 0x8e, 0xbf, 0xf7, 0xc1, 0x50, 0x5a, 0x6b, + 0xb9, 0xd9, 0x19, 0x8c, 0x02, 0x9c, 0xb0, 0x84, 0x06, 0x38, 0xf2, 0x2b, 0xf1, 0xd9, 0x15, 0xb6, + 0x0a, 0x25, 0xcd, 0x31, 0xcb, 0x13, 0xe1, 0xab, 0x2d, 0xd2, 0x5a, 0xb4, 0x14, 0xf2, 0x52, 0xae, + 0xd2, 0x7d, 0x98, 0xe8, 0x30, 0x0e, 0x02, 0xc2, 0x39, 0xcb, 0x9c, 0xbe, 0x4a, 0x19, 0x2b, 0xf4, + 0xb2, 0x00, 0xeb, 0x2a, 0x29, 0x16, 0x3b, 0x25, 0xbc, 0xb2, 0xca, 0x73, 0x2c, 0x76, 0xd7, 0xfb, + 0x99, 0x6a, 0xfd, 0x9d, 0xaa, 0x2e, 0xb7, 0x64, 0xd8, 0xd8, 0x92, 0x96, 0xd2, 0xcd, 0x23, 0x28, + 0xdd, 0xba, 0xb1, 0xd2, 0x1f, 0xc3, 0x27, 0x85, 0xd2, 0x37, 0x19, 0x8b, 0xfd, 0x26, 0xd3, 0x5a, + 0x90, 0x96, 0x77, 0x5b, 0x27, 0x3c, 0xcd, 0x58, 0xfc, 0x4d, 0x4d, 0x3a, 0x6f, 0xc9, 0xcb, 0x3e, + 0xb2, 0x43, 0xfd, 0x6a, 0x00, 0xd2, 0x1b, 0xf0, 0x42, 0xb0, 0x0c, 0x6f, 0x89, 0x2e, 0xf1, 0x39, + 0x98, 0x29, 0xc9, 0x38, 0x4b, 0x70, 0x69, 0x57, 0xa7, 0xf5, 0x1c, 0x9e, 0xeb, 0x88, 0x32, 0xe7, + 0x62, 0x0a, 0x65, 0xf2, 0x7b, 0x79, 0xd7, 0xd3, 0x96, 0x77, 0x9d, 0x1f, 0xae, 0x63, 0xb3, 0x99, + 0x0f, 0xc5, 0xc7, 0x3e, 0x83, 0x69, 0xed, 0x63, 0xfe, 0x0e, 0xf3, 0x5d, 0x61, 0x66, 0xe3, 0xca, + 0xcc, 0xbe, 0xc7, 0x7c, 0x87, 0x9e, 0xbd, 0xcd, 0xd0, 0x1e, 0x5e, 0xcf, 0xe0, 0xbb, 0xcd, 0xed, + 0xbf, 0xe3, 0x4e, 0x7f, 0xf6, 0xe0, 0x56, 0x4b, 0x5a, 0x2d, 0xa7, 0x3a, 0x05, 0xab, 0xa2, 0xb9, + 0xe8, 0xc7, 0x24, 0x05, 0xbf, 0xff, 0x8e, 0x47, 0x5d, 0xb5, 0x3c, 0xea, 0xc1, 0x35, 0xbb, 0xf1, + 0x7f, 0xf4, 0xab, 0x47, 0x70, 0xbb, 0xe9, 0x57, 0x0d, 0x59, 0x6b, 0xb3, 0xfa, 0xa8, 0x36, 0xab, + 0x4a, 0xda, 0xff, 0x48, 0x47, 0x5f, 0x3f, 0xfc, 0xf9, 0x7c, 0x4b, 0xc5, 0x2e, 0x5f, 0xbb, 0x01, + 0x8b, 0x97, 0x52, 0xfb, 0x34, 0x60, 0x59, 0xba, 0x7c, 0x8d, 0xf3, 0x48, 0x2c, 0x0f, 0xfe, 0xa5, + 0xaf, 0x07, 0xea, 0x37, 0x3c, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x54, 0x4e, 0x98, 0x4a, 0x07, + 0x0c, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/helper/identity/types.proto b/vendor/github.com/hashicorp/vault/helper/identity/types.proto new file mode 100644 index 00000000..8fed7334 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/identity/types.proto @@ -0,0 +1,203 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/identity"; + +package identity; + +import "google/protobuf/timestamp.proto"; +import "helper/identity/mfa/types.proto"; + +// Group represents an identity group. +message Group { + // ID is the unique identifier for this group + string id = 1; + + // Name is the unique name for this group + string name = 2; + + // Policies are the vault policies to be granted to members of this group + repeated string policies = 3; + + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + repeated string parent_group_ids = 4; + + // MemberEntityIDs are the identifiers of entities which are members of this + // group + repeated string member_entity_ids = 5; + + // Metadata represents the custom data tied with this group + map metadata = 6; + + // CreationTime is the time at which this group was created + google.protobuf.Timestamp creation_time = 7; + + // LastUpdateTime is the time at which this group was last modified + google.protobuf.Timestamp last_update_time= 8; + + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + uint64 modify_index = 9; + + // BucketKey is the path of the storage packer key into which this group is + // stored. + string bucket_key = 10; + + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + Alias alias = 11; + + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + string type = 12; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + string namespace_id = 13; +} + +// Entity represents an entity that gets persisted and indexed. +// Entity is fundamentally composed of zero or many aliases. +message Entity { + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + repeated Alias aliases = 1; + + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + string id = 2; + + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + string name = 3; + + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + map metadata = 4; + + // CreationTime is the time at which this entity is first created. + google.protobuf.Timestamp creation_time = 5; + + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + google.protobuf.Timestamp last_update_time= 6; + + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + repeated string merged_entity_ids = 7; + + // Policies the entity is entitled to + repeated string policies = 8; + + // BucketKey is the path of the storage packer key into which this entity is + // stored. + string bucket_key = 9; + + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + map mfa_secrets = 10; + + // Disabled indicates whether tokens associated with the account should not + // be able to be used + bool disabled = 11; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + string namespace_id = 12; +} + +// Alias represents the alias that gets stored inside of the +// entity object in storage and also represents in an in-memory index of an +// alias object. +message Alias { + // ID is the unique identifier that represents this alias + string id = 1; + + // CanonicalID is the entity identifier to which this alias belongs to + string canonical_id = 2; + + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + string mount_type = 3; + + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + string mount_accessor = 4; + + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + string mount_path = 5; + + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + map metadata = 6; + + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + string name = 7; + + // CreationTime is the time at which this alias was first created + google.protobuf.Timestamp creation_time = 8; + + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + google.protobuf.Timestamp last_update_time = 9; + + // MergedFromCanonicalIDs is the FIFO history of merging activity + repeated string merged_from_canonical_ids = 10; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 11; +} + +// Deprecated. Retained for backwards compatibility. +message EntityStorageEntry { + repeated PersonaIndexEntry personas = 1; + string id = 2; + string name = 3; + map metadata = 4; + google.protobuf.Timestamp creation_time = 5; + google.protobuf.Timestamp last_update_time= 6; + repeated string merged_entity_ids = 7; + repeated string policies = 8; + string bucket_key_hash = 9; + map mfa_secrets = 10; +} + +// Deprecated. Retained for backwards compatibility. +message PersonaIndexEntry { + string id = 1; + string entity_id = 2; + string mount_type = 3; + string mount_accessor = 4; + string mount_path = 5; + map metadata = 6; + string name = 7; + google.protobuf.Timestamp creation_time = 8; + google.protobuf.Timestamp last_update_time = 9; + repeated string merged_from_entity_ids = 10; +} diff --git a/vendor/github.com/hashicorp/vault/helper/listenerutil/listener.go b/vendor/github.com/hashicorp/vault/helper/listenerutil/listener.go new file mode 100644 index 00000000..8f17b723 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/listenerutil/listener.go @@ -0,0 +1,271 @@ +package listenerutil + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + osuser "os/user" + "strconv" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/reload" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/tlsutil" + "github.com/jefferai/isbadcipher" + "github.com/mitchellh/cli" +) + +type UnixSocketsConfig struct { + User string `hcl:"user"` + Mode string `hcl:"mode"` + Group string `hcl:"group"` +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} + +func UnixSocketListener(path string, unixSocketsConfig *UnixSocketsConfig) (net.Listener, error) { + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to remove socket file: %v", err) + } + + ln, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + if unixSocketsConfig != nil { + err = setFilePermissions(path, unixSocketsConfig.User, unixSocketsConfig.Group, unixSocketsConfig.Mode) + if err != nil { + return nil, fmt.Errorf("failed to set file system permissions on the socket file: %s", err) + } + } + + // Wrap the listener in rmListener so that the Unix domain socket file is + // removed on close. + return &rmListener{ + Listener: ln, + Path: path, + }, nil +} + +func WrapTLS( + ln net.Listener, + props map[string]string, + config map[string]interface{}, + ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, *tls.Config, error) { + props["tls"] = "disabled" + + if v, ok := config["tls_disable"]; ok { + disabled, err := parseutil.ParseBool(v) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_disable': {{err}}", err) + } + if disabled { + return ln, props, nil, nil, nil + } + } + + certFileRaw, ok := config["tls_cert_file"] + if !ok { + return nil, nil, nil, nil, fmt.Errorf("'tls_cert_file' must be set") + } + certFile := certFileRaw.(string) + keyFileRaw, ok := config["tls_key_file"] + if !ok { + return nil, nil, nil, nil, fmt.Errorf("'tls_key_file' must be set") + } + keyFile := keyFileRaw.(string) + + cg := reload.NewCertificateGetter(certFile, keyFile, "") + if err := cg.Reload(config); err != nil { + // We try the key without a passphrase first and if we get an incorrect + // passphrase response, try again after prompting for a passphrase + if errwrap.Contains(err, x509.IncorrectPasswordError.Error()) { + var passphrase string + passphrase, err = ui.AskSecret(fmt.Sprintf("Enter passphrase for %s:", keyFile)) + if err == nil { + cg = reload.NewCertificateGetter(certFile, keyFile, passphrase) + if err = cg.Reload(config); err == nil { + goto PASSPHRASECORRECT + } + } + } + return nil, nil, nil, nil, errwrap.Wrapf("error loading TLS cert: {{err}}", err) + } + +PASSPHRASECORRECT: + var tlsvers string + tlsversRaw, ok := config["tls_min_version"] + if !ok { + tlsvers = "tls12" + } else { + tlsvers = tlsversRaw.(string) + } + + tlsConf := &tls.Config{} + tlsConf.GetCertificate = cg.GetCertificate + tlsConf.NextProtos = []string{"h2", "http/1.1"} + tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers] + if !ok { + return nil, nil, nil, nil, fmt.Errorf("'tls_min_version' value %q not supported, please specify one of [tls10,tls11,tls12]", tlsvers) + } + tlsConf.ClientAuth = tls.RequestClientCert + + if v, ok := config["tls_cipher_suites"]; ok { + ciphers, err := tlsutil.ParseCiphers(v.(string)) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_cipher_suites': {{err}}", err) + } + + // HTTP/2 with TLS 1.2 blacklists several cipher suites. + // https://tools.ietf.org/html/rfc7540#appendix-A + // + // Since the CLI (net/http) automatically uses HTTP/2 with TLS 1.2, + // we check here if all or some specified cipher suites are blacklisted. + badCiphers := []string{} + for _, cipher := range ciphers { + if isbadcipher.IsBadCipher(cipher) { + // Get the name of the current cipher. + cipherStr, err := tlsutil.GetCipherName(cipher) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_cipher_suites': {{err}}", err) + } + badCiphers = append(badCiphers, cipherStr) + } + } + if len(badCiphers) == len(ciphers) { + ui.Warn(`WARNING! All cipher suites defined by 'tls_cipher_suites' are blacklisted by the +HTTP/2 specification. HTTP/2 communication with TLS 1.2 will not work as intended +and Vault will be unavailable via the CLI. +Please see https://tools.ietf.org/html/rfc7540#appendix-A for further information.`) + } else if len(badCiphers) > 0 { + ui.Warn(fmt.Sprintf(`WARNING! The following cipher suites defined by 'tls_cipher_suites' are +blacklisted by the HTTP/2 specification: +%v +Please see https://tools.ietf.org/html/rfc7540#appendix-A for further information.`, badCiphers)) + } + tlsConf.CipherSuites = ciphers + } + if v, ok := config["tls_prefer_server_cipher_suites"]; ok { + preferServer, err := parseutil.ParseBool(v) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_prefer_server_cipher_suites': {{err}}", err) + } + tlsConf.PreferServerCipherSuites = preferServer + } + var requireVerifyCerts bool + var err error + if v, ok := config["tls_require_and_verify_client_cert"]; ok { + requireVerifyCerts, err = parseutil.ParseBool(v) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_require_and_verify_client_cert': {{err}}", err) + } + if requireVerifyCerts { + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + } + if tlsClientCaFile, ok := config["tls_client_ca_file"]; ok { + caPool := x509.NewCertPool() + data, err := ioutil.ReadFile(tlsClientCaFile.(string)) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("failed to read tls_client_ca_file: {{err}}", err) + } + + if !caPool.AppendCertsFromPEM(data) { + return nil, nil, nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file") + } + tlsConf.ClientCAs = caPool + } + } + if v, ok := config["tls_disable_client_certs"]; ok { + disableClientCerts, err := parseutil.ParseBool(v) + if err != nil { + return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_disable_client_certs': {{err}}", err) + } + if disableClientCerts && requireVerifyCerts { + return nil, nil, nil, nil, fmt.Errorf("'tls_disable_client_certs' and 'tls_require_and_verify_client_cert' are mutually exclusive") + } + if disableClientCerts { + tlsConf.ClientAuth = tls.NoClientCert + } + } + + ln = tls.NewListener(ln, tlsConf) + props["tls"] = "enabled" + return ln, props, cg.Reload, tlsConf, nil +} + +// setFilePermissions handles configuring ownership and permissions +// settings on a given file. All permission/ownership settings are +// optional. If no user or group is specified, the current user/group +// will be used. Mode is optional, and has no default (the operation is +// not performed if absent). User may be specified by name or ID, but +// group may only be specified by ID. +func setFilePermissions(path string, user, group, mode string) error { + var err error + uid, gid := os.Getuid(), os.Getgid() + + if user != "" { + if uid, err = strconv.Atoi(user); err == nil { + goto GROUP + } + + // Try looking up the user by name + u, err := osuser.Lookup(user) + if err != nil { + return fmt.Errorf("failed to look up user %q: %v", user, err) + } + uid, _ = strconv.Atoi(u.Uid) + } + +GROUP: + if group != "" { + if gid, err = strconv.Atoi(group); err == nil { + goto OWN + } + + // Try looking up the user by name + g, err := osuser.LookupGroup(group) + if err != nil { + return fmt.Errorf("failed to look up group %q: %v", user, err) + } + gid, _ = strconv.Atoi(g.Gid) + } + +OWN: + if err := os.Chown(path, uid, gid); err != nil { + return fmt.Errorf("failed setting ownership to %d:%d on %q: %v", + uid, gid, path, err) + } + + if mode != "" { + mode, err := strconv.ParseUint(mode, 8, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", mode) + } + if err := os.Chmod(path, os.FileMode(mode)); err != nil { + return fmt.Errorf("failed setting permissions to %d on %q: %v", + mode, path, err) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/metricsutil/metricsutil.go b/vendor/github.com/hashicorp/vault/helper/metricsutil/metricsutil.go new file mode 100644 index 00000000..83ca85a2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/metricsutil/metricsutil.go @@ -0,0 +1,131 @@ +package metricsutil + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/sdk/logical" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" +) + +const ( + OpenMetricsMIMEType = "application/openmetrics-text" + + PrometheusSchemaMIMEType = "prometheus/telemetry" + + // ErrorContentType is the content type returned by an error response. + ErrorContentType = "text/plain" +) + +const ( + PrometheusMetricFormat = "prometheus" +) + +type MetricsHelper struct { + inMemSink *metrics.InmemSink + PrometheusEnabled bool +} + +func NewMetricsHelper(inMem *metrics.InmemSink, enablePrometheus bool) *MetricsHelper { + return &MetricsHelper{inMem, enablePrometheus} +} + +func FormatFromRequest(req *logical.Request) string { + acceptHeaders := req.Headers["Accept"] + if len(acceptHeaders) > 0 { + acceptHeader := acceptHeaders[0] + if strings.HasPrefix(acceptHeader, OpenMetricsMIMEType) { + return PrometheusMetricFormat + } + + // Look for prometheus accept header + for _, header := range acceptHeaders { + if strings.Contains(header, PrometheusSchemaMIMEType) { + return PrometheusMetricFormat + } + } + } + return "" +} + +func (m *MetricsHelper) ResponseForFormat(format string) *logical.Response { + switch format { + case PrometheusMetricFormat: + return m.PrometheusResponse() + case "": + return m.GenericResponse() + default: + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPRawBody: fmt.Sprintf("metric response format \"%s\" unknown", format), + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + } +} + +func (m *MetricsHelper) PrometheusResponse() *logical.Response { + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + + if !m.PrometheusEnabled { + resp.Data[logical.HTTPRawBody] = "prometheus is not enabled" + return resp + } + metricsFamilies, err := prometheus.DefaultGatherer.Gather() + if err != nil && len(metricsFamilies) == 0 { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("no prometheus metrics could be decoded: %s", err) + return resp + } + + // Initialize a byte buffer. + buf := &bytes.Buffer{} + defer buf.Reset() + + e := expfmt.NewEncoder(buf, expfmt.FmtText) + for _, mf := range metricsFamilies { + err := e.Encode(mf) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error during the encoding of metrics: %s", err) + return resp + } + } + resp.Data[logical.HTTPContentType] = string(expfmt.FmtText) + resp.Data[logical.HTTPRawBody] = buf.Bytes() + resp.Data[logical.HTTPStatusCode] = http.StatusOK + return resp +} + +func (m *MetricsHelper) GenericResponse() *logical.Response { + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + + summary, err := m.inMemSink.DisplayMetrics(nil, nil) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error while fetching the in-memory metrics: %s", err) + return resp + } + content, err := json.Marshal(summary) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error while marshalling the in-memory metrics: %s", err) + return resp + } + resp.Data[logical.HTTPContentType] = "application/json" + resp.Data[logical.HTTPRawBody] = content + resp.Data[logical.HTTPStatusCode] = http.StatusOK + return resp +} diff --git a/vendor/github.com/hashicorp/vault/helper/namespace/namespace.go b/vendor/github.com/hashicorp/vault/helper/namespace/namespace.go new file mode 100644 index 00000000..e47a27f3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/namespace/namespace.go @@ -0,0 +1,127 @@ +package namespace + +import ( + "context" + "errors" + "strings" +) + +type contextValues struct{} + +type Namespace struct { + ID string `json:"id"` + Path string `json:"path"` +} + +const ( + RootNamespaceID = "root" +) + +var ( + contextNamespace contextValues = struct{}{} + ErrNoNamespace error = errors.New("no namespace") + RootNamespace *Namespace = &Namespace{ + ID: RootNamespaceID, + Path: "", + } +) + +func (n *Namespace) HasParent(possibleParent *Namespace) bool { + switch { + case n.Path == "": + return false + case possibleParent.Path == "": + return true + default: + return strings.HasPrefix(n.Path, possibleParent.Path) + } +} + +func (n *Namespace) TrimmedPath(path string) string { + return strings.TrimPrefix(path, n.Path) +} + +func ContextWithNamespace(ctx context.Context, ns *Namespace) context.Context { + return context.WithValue(ctx, contextNamespace, ns) +} + +func RootContext(ctx context.Context) context.Context { + if ctx == nil { + return ContextWithNamespace(context.Background(), RootNamespace) + } + return ContextWithNamespace(ctx, RootNamespace) +} + +// This function caches the ns to avoid doing a .Value lookup over and over, +// because it's called a *lot* in the request critical path. .Value is +// concurrency-safe so uses some kind of locking/atomicity, but it should never +// be read before first write, plus we don't believe this will be called from +// different goroutines, so it should be safe. +func FromContext(ctx context.Context) (*Namespace, error) { + if ctx == nil { + return nil, errors.New("context was nil") + } + + nsRaw := ctx.Value(contextNamespace) + if nsRaw == nil { + return nil, ErrNoNamespace + } + + ns := nsRaw.(*Namespace) + if ns == nil { + return nil, ErrNoNamespace + } + + return ns, nil +} + +// Canonicalize trims any prefix '/' and adds a trailing '/' to the +// provided string +func Canonicalize(nsPath string) string { + if nsPath == "" { + return "" + } + + // Canonicalize the path to not have a '/' prefix + nsPath = strings.TrimPrefix(nsPath, "/") + + // Canonicalize the path to always having a '/' suffix + if !strings.HasSuffix(nsPath, "/") { + nsPath += "/" + } + + return nsPath +} + +func SplitIDFromString(input string) (string, string) { + prefix := "" + slashIdx := strings.LastIndex(input, "/") + + switch { + case strings.HasPrefix(input, "b."): + prefix = "b." + input = input[2:] + + case strings.HasPrefix(input, "s."): + prefix = "s." + input = input[2:] + + case slashIdx > 0: + // Leases will never have a b./s. to start + if slashIdx == len(input)-1 { + return input, "" + } + prefix = input[:slashIdx+1] + input = input[slashIdx+1:] + } + + idx := strings.LastIndex(input, ".") + if idx == -1 { + return prefix + input, "" + } + if idx == len(input)-1 { + return prefix + input, "" + } + + return prefix + input[:idx], input[idx+1:] +} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go new file mode 100644 index 00000000..eef4c5ed --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go @@ -0,0 +1,118 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" +) + +// EncryptShares takes an ordered set of byte slices to encrypt and the +// corresponding base64-encoded public keys to encrypt them with, encrypts each +// byte slice with the corresponding public key. +// +// Note: There is no corresponding test function; this functionality is +// thoroughly tested in the init and rekey command unit tests +func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) { + if len(input) != len(pgpKeys) { + return nil, nil, fmt.Errorf("mismatch between number items to encrypt and number of PGP keys") + } + encryptedShares := make([][]byte, 0, len(pgpKeys)) + entities, err := GetEntities(pgpKeys) + if err != nil { + return nil, nil, err + } + for i, entity := range entities { + ctBuf := bytes.NewBuffer(nil) + pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil) + if err != nil { + return nil, nil, errwrap.Wrapf("error setting up encryption for PGP message: {{err}}", err) + } + _, err = pt.Write(input[i]) + if err != nil { + return nil, nil, errwrap.Wrapf("error encrypting PGP message: {{err}}", err) + } + pt.Close() + encryptedShares = append(encryptedShares, ctBuf.Bytes()) + } + + fingerprints, err := GetFingerprints(nil, entities) + if err != nil { + return nil, nil, err + } + + return fingerprints, encryptedShares, nil +} + +// GetFingerprints takes in a list of openpgp Entities and returns the +// fingerprints. If entities is nil, it will instead parse both entities and +// fingerprints from the pgpKeys string slice. +func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) { + if entities == nil { + var err error + entities, err = GetEntities(pgpKeys) + + if err != nil { + return nil, err + } + } + ret := make([]string, 0, len(entities)) + for _, entity := range entities { + ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)) + } + return ret, nil +} + +// GetEntities takes in a string array of base64-encoded PGP keys and returns +// the openpgp Entities +func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) { + ret := make([]*openpgp.Entity, 0, len(pgpKeys)) + for _, keystring := range pgpKeys { + data, err := base64.StdEncoding.DecodeString(keystring) + if err != nil { + return nil, errwrap.Wrapf("error decoding given PGP key: {{err}}", err) + } + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) + if err != nil { + return nil, errwrap.Wrapf("error parsing given PGP key: {{err}}", err) + } + ret = append(ret, entity) + } + return ret, nil +} + +// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded +// private key and decrypts it. A bytes.Buffer is returned to allow the caller +// to do useful thing with it (get it as a []byte, get it as a string, use it +// as an io.Reader, etc), and also because this function doesn't know if what +// comes out is binary data or a string, so let the caller decide. +func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) { + privKeyBytes, err := base64.StdEncoding.DecodeString(privKey) + if err != nil { + return nil, errwrap.Wrapf("error decoding base64 private key: {{err}}", err) + } + + cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt) + if err != nil { + return nil, errwrap.Wrapf("error decoding base64 crypted bytes: {{err}}", err) + } + + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) + if err != nil { + return nil, errwrap.Wrapf("error parsing private key: {{err}}", err) + } + + entityList := &openpgp.EntityList{entity} + md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil) + if err != nil { + return nil, errwrap.Wrapf("error decrypting the messages: {{err}}", err) + } + + ptBuf := bytes.NewBuffer(nil) + ptBuf.ReadFrom(md.UnverifiedBody) + + return ptBuf, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go new file mode 100644 index 00000000..4d5fd969 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go @@ -0,0 +1,143 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/keybase/go-crypto/openpgp" +) + +// PubKeyFileFlag implements flag.Value and command.Example to receive exactly +// one PGP or keybase key via a flag. +type PubKeyFileFlag string + +func (p *PubKeyFileFlag) String() string { return string(*p) } + +func (p *PubKeyFileFlag) Set(val string) error { + if p != nil && *p != "" { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + if len(keys) > 1 { + return errors.New("can only specify one pgp key") + } + + *p = PubKeyFileFlag(keys[0]) + return nil +} + +func (p *PubKeyFileFlag) Example() string { return "keybase:user" } + +// PGPPubKeyFiles implements the flag.Value interface and allows parsing and +// reading a list of PGP public key files. +type PubKeyFilesFlag []string + +func (p *PubKeyFilesFlag) String() string { + return fmt.Sprint(*p) +} + +func (p *PubKeyFilesFlag) Set(val string) error { + if len(*p) > 0 { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + *p = PubKeyFilesFlag(keys) + return nil +} + +func (p *PubKeyFilesFlag) Example() string { return "keybase:user1, keybase:user2, ..." } + +// ParsePGPKeys takes a list of PGP keys and parses them either using keybase +// or reading them from disk and returns the "expanded" list of pgp keys in +// the same order. +func ParsePGPKeys(keyfiles []string) ([]string, error) { + keys := make([]string, len(keyfiles)) + + keybaseMap, err := FetchKeybasePubkeys(keyfiles) + if err != nil { + return nil, err + } + + for i, keyfile := range keyfiles { + keyfile = strings.TrimSpace(keyfile) + + if strings.HasPrefix(keyfile, kbPrefix) { + key, ok := keybaseMap[keyfile] + if !ok || key == "" { + return nil, fmt.Errorf("keybase user %q not found", strings.TrimPrefix(keyfile, kbPrefix)) + } + keys[i] = key + continue + } + + pgpStr, err := ReadPGPFile(keyfile) + if err != nil { + return nil, err + } + keys[i] = pgpStr + } + + return keys, nil +} + +// ReadPGPFile reads the given PGP file from disk. +func ReadPGPFile(path string) (string, error) { + if len(path) <= 0 { + return "", errors.New("empty path") + } + if path[0] == '@' { + path = path[1:] + } + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + buf := bytes.NewBuffer(nil) + _, err = buf.ReadFrom(f) + if err != nil { + return "", err + } + + // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string + keyReader := bytes.NewReader(buf.Bytes()) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err == nil { + if len(entityList) != 1 { + return "", fmt.Errorf("more than one key found in file %q", path) + } + if entityList[0] == nil { + return "", fmt.Errorf("primary key was nil for file %q", path) + } + + serializedEntity := bytes.NewBuffer(nil) + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("error serializing entity for file %q: {{err}}", path), err) + } + + return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil + } + + _, err = base64.StdEncoding.DecodeString(buf.String()) + if err == nil { + return buf.String(), nil + } + return base64.StdEncoding.EncodeToString(buf.Bytes()), nil + +} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go new file mode 100644 index 00000000..21572350 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go @@ -0,0 +1,117 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/keybase/go-crypto/openpgp" +) + +const ( + kbPrefix = "keybase:" +) + +// FetchKeybasePubkeys fetches public keys from Keybase given a set of +// usernames, which are derived from correctly formatted input entries. It +// doesn't use their client code due to both the API and the fact that it is +// considered alpha and probably best not to rely on it. The keys are returned +// as base64-encoded strings. +func FetchKeybasePubkeys(input []string) (map[string]string, error) { + client := cleanhttp.DefaultClient() + if client == nil { + return nil, fmt.Errorf("unable to create an http client") + } + + if len(input) == 0 { + return nil, nil + } + + usernames := make([]string, 0, len(input)) + for _, v := range input { + if strings.HasPrefix(v, kbPrefix) { + usernames = append(usernames, strings.TrimPrefix(v, kbPrefix)) + } + } + + if len(usernames) == 0 { + return nil, nil + } + + ret := make(map[string]string, len(usernames)) + url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ",")) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + type PublicKeys struct { + Primary struct { + Bundle string + } + } + + type LThem struct { + PublicKeys `json:"public_keys"` + } + + type KbResp struct { + Status struct { + Name string + } + Them []LThem + } + + out := &KbResp{ + Them: []LThem{}, + } + + if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { + return nil, err + } + + if out.Status.Name != "OK" { + return nil, fmt.Errorf("got non-OK response: %q", out.Status.Name) + } + + missingNames := make([]string, 0, len(usernames)) + var keyReader *bytes.Reader + serializedEntity := bytes.NewBuffer(nil) + for i, themVal := range out.Them { + if themVal.Primary.Bundle == "" { + missingNames = append(missingNames, usernames[i]) + continue + } + keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle)) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err != nil { + return nil, err + } + if len(entityList) != 1 { + return nil, fmt.Errorf("primary key could not be parsed for user %q", usernames[i]) + } + if entityList[0] == nil { + return nil, fmt.Errorf("primary key was nil for user %q", usernames[i]) + } + + serializedEntity.Reset() + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error serializing entity for user %q: {{err}}", usernames[i]), err) + } + + // The API returns values in the same ordering requested, so this should properly match + ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes()) + } + + if len(missingNames) > 0 { + return nil, fmt.Errorf("unable to fetch keys for user(s) %q from keybase", strings.Join(missingNames, ",")) + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go new file mode 100644 index 00000000..c10a9055 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go @@ -0,0 +1,271 @@ +package pgpkeys + +const ( + TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX +oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj +UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx +JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD +jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4 +yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek +nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6 +kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2 +Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR +ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk +Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE +sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52 +N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d +4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3 +9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe +o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR +BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf +Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a +9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu +9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z +bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ +xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z +UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ +6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr +drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34 +byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO +gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS +astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM +FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg +EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA +K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I +n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA +3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM +9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z +XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr +9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc +ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4 +EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf +NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln +G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g +H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf +PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h +7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz +HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd +WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se +q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN +9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ +QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe +lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1 +iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu +h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8 +ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j +Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha +V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF +kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip +ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq +hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG +kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0 +vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx +k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe +4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t +GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH +yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r +ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T +AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO +8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi +jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP +hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk +uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v +ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR +a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL +rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf +jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW +H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe +PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn +9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ +kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/ +6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j +ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn +igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC +LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK +HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6 +DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO +hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/ +FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q +dXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa +39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da +A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt +JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq +26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5 +OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx +yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L +1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr +MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe +Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7 +gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK +64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM +3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+ +ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c +wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B +DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk +hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B ++Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD +y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc +4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN +Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9 +T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/ +VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN +rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a +CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N +Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp +W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs +wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx +U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k +85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt +ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK +VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN +U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup +Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij +AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh +BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen +4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD +umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I +LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj +KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2 +FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9 +ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o +fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u +KFIzGEPKVoBF9U7VBXpyxpsz+A==` + + TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B +HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD +cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE +A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB +C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa +QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn +aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y +jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb +6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N +ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu +9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu +lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN +C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0 +YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi +oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH +/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI +PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O +9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx +8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd +OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr +XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO +2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P +Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX +elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY +DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef +pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg ++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m +UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ +SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW +IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ +AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD +hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ +e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K +BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY +ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H +/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7 +PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U +PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd +w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 +MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8 +LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi +pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6 +LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY +MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1 +lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3 +s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V +KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8 +7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645 +jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ +ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ +AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI +WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr ++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ +GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827 ++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI +AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c +GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj +lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv +dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn +puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=` + + TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz +wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7 +H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX +1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR +fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz +5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5 +IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ +EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT +JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z +mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z +J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+ +7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o +EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I +1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj +h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj +OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o +P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle +Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh +EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk +GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd +tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY +cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7 +fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY ++XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7 +moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko +jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ +iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy +rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF +Pu3AlUaGNMBlvy6PEpU= +=NUTS +-----END PGP PUBLIC KEY BLOCK-----` +) diff --git a/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go new file mode 100644 index 00000000..4c373402 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go @@ -0,0 +1,80 @@ +package proxyutil + +import ( + "fmt" + "net" + "sync" + "time" + + proxyproto "github.com/armon/go-proxyproto" + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +// ProxyProtoConfig contains configuration for the PROXY protocol +type ProxyProtoConfig struct { + sync.RWMutex + Behavior string + AuthorizedAddrs []*sockaddr.SockAddrMarshaler `json:"authorized_addrs"` +} + +func (p *ProxyProtoConfig) SetAuthorizedAddrs(addrs interface{}) error { + aa, err := parseutil.ParseAddrs(addrs) + if err != nil { + return err + } + + p.AuthorizedAddrs = aa + return nil +} + +// WrapInProxyProto wraps the given listener in the PROXY protocol. If behavior +// is "use_if_authorized" or "deny_if_unauthorized" it also configures a +// SourceCheck based on the given ProxyProtoConfig. In an error case it returns +// the original listener and the error. +func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.Listener, error) { + config.Lock() + defer config.Unlock() + + var newLn *proxyproto.Listener + + switch config.Behavior { + case "use_always": + newLn = &proxyproto.Listener{ + Listener: listener, + ProxyHeaderTimeout: 10 * time.Second, + } + + case "allow_authorized", "deny_unauthorized": + newLn = &proxyproto.Listener{ + Listener: listener, + ProxyHeaderTimeout: 10 * time.Second, + SourceCheck: func(addr net.Addr) (bool, error) { + config.RLock() + defer config.RUnlock() + + sa, err := sockaddr.NewSockAddr(addr.String()) + if err != nil { + return false, errwrap.Wrapf("error parsing remote address: {{err}}", err) + } + + for _, authorizedAddr := range config.AuthorizedAddrs { + if authorizedAddr.Contains(sa) { + return true, nil + } + } + + if config.Behavior == "allow_authorized" { + return false, nil + } + + return false, proxyproto.ErrInvalidUpstream + }, + } + default: + return listener, fmt.Errorf("unknown behavior type for proxy proto config") + } + + return newLn, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/reload/reload.go b/vendor/github.com/hashicorp/vault/helper/reload/reload.go new file mode 100644 index 00000000..44526c08 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/reload/reload.go @@ -0,0 +1,85 @@ +package reload + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "sync" + + "github.com/hashicorp/errwrap" +) + +// ReloadFunc are functions that are called when a reload is requested +type ReloadFunc func(map[string]interface{}) error + +// CertificateGetter satisfies ReloadFunc and its GetCertificate method +// satisfies the tls.GetCertificate function signature. Currently it does not +// allow changing paths after the fact. +type CertificateGetter struct { + sync.RWMutex + + cert *tls.Certificate + + certFile string + keyFile string + passphrase string +} + +func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGetter { + return &CertificateGetter{ + certFile: certFile, + keyFile: keyFile, + passphrase: passphrase, + } +} + +func (cg *CertificateGetter) Reload(_ map[string]interface{}) error { + certPEMBlock, err := ioutil.ReadFile(cg.certFile) + if err != nil { + return err + } + keyPEMBlock, err := ioutil.ReadFile(cg.keyFile) + if err != nil { + return err + } + + // Check for encrypted pem block + keyBlock, _ := pem.Decode(keyPEMBlock) + if keyBlock == nil { + return errors.New("decoded PEM is blank") + } + + if x509.IsEncryptedPEMBlock(keyBlock) { + keyBlock.Bytes, err = x509.DecryptPEMBlock(keyBlock, []byte(cg.passphrase)) + if err != nil { + return errwrap.Wrapf("Decrypting PEM block failed {{err}}", err) + } + keyPEMBlock = pem.EncodeToMemory(keyBlock) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return err + } + + cg.Lock() + defer cg.Unlock() + + cg.cert = &cert + + return nil +} + +func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cg.RLock() + defer cg.RUnlock() + + if cg.cert == nil { + return nil, fmt.Errorf("nil certificate") + } + + return cg.cert, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/storagepacker/storagepacker.go b/vendor/github.com/hashicorp/vault/helper/storagepacker/storagepacker.go new file mode 100644 index 00000000..5bb7a725 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/storagepacker/storagepacker.go @@ -0,0 +1,403 @@ +package storagepacker + +import ( + "context" + "crypto/md5" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/compressutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + bucketCount = 256 + // StoragePackerBucketsPrefix is the default storage key prefix under which + // bucket data will be stored. + StoragePackerBucketsPrefix = "packer/buckets/" +) + +// StoragePacker packs items into a specific number of buckets by hashing +// its identifier and indexing on it. Currently this supports only 256 bucket entries and +// hence relies on the first byte of the hash value for indexing. +type StoragePacker struct { + view logical.Storage + logger log.Logger + storageLocks []*locksutil.LockEntry + viewPrefix string +} + +// View returns the storage view configured to be used by the packer +func (s *StoragePacker) View() logical.Storage { + return s.view +} + +// GetBucket returns a bucket for a given key +func (s *StoragePacker) GetBucket(key string) (*Bucket, error) { + if key == "" { + return nil, fmt.Errorf("missing bucket key") + } + + lock := locksutil.LockForKey(s.storageLocks, key) + lock.RLock() + defer lock.RUnlock() + + // Read from storage + storageEntry, err := s.view.Get(context.Background(), key) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) + } + if storageEntry == nil { + return nil, nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return nil, errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + var bucket Bucket + err = proto.Unmarshal(uncompressedData, &bucket) + if err != nil { + return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) + } + + return &bucket, nil +} + +// upsert either inserts a new item into the bucket or updates an existing one +// if an item with a matching key is already present. +func (s *Bucket) upsert(item *Item) error { + if s == nil { + return fmt.Errorf("nil storage bucket") + } + + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing item ID") + } + + // Look for an item with matching key and don't modify the collection while + // iterating + foundIdx := -1 + for itemIdx, bucketItems := range s.Items { + if bucketItems.ID == item.ID { + foundIdx = itemIdx + break + } + } + + // If there is no match, append the item, otherwise update it + if foundIdx == -1 { + s.Items = append(s.Items, item) + } else { + s.Items[foundIdx] = item + } + + return nil +} + +// BucketKey returns the storage key of the bucket where the given item will be +// stored. +func (s *StoragePacker) BucketKey(itemID string) string { + hf := md5.New() + input := []byte(itemID) + n, err := hf.Write(input) + // Make linter happy + if err != nil || n != len(input) { + return "" + } + index := uint8(hf.Sum(nil)[0]) + return s.viewPrefix + strconv.Itoa(int(index)) +} + +// DeleteItem removes the item from the respective bucket +func (s *StoragePacker) DeleteItem(_ context.Context, itemID string) error { + return s.DeleteMultipleItems(context.Background(), nil, itemID) +} + +func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Logger, itemIDs ...string) error { + var err error + switch len(itemIDs) { + case 0: + // Nothing + return nil + + case 1: + logger = hclog.NewNullLogger() + fallthrough + + default: + lockIndexes := make(map[string]struct{}, len(s.storageLocks)) + for _, itemID := range itemIDs { + bucketKey := s.BucketKey(itemID) + if _, ok := lockIndexes[bucketKey]; !ok { + lockIndexes[bucketKey] = struct{}{} + } + } + + lockKeys := make([]string, 0, len(lockIndexes)) + for k := range lockIndexes { + lockKeys = append(lockKeys, k) + } + + locks := locksutil.LocksForKeys(s.storageLocks, lockKeys) + for _, lock := range locks { + lock.Lock() + defer lock.Unlock() + } + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + bucketCache := make(map[string]*Bucket, len(s.storageLocks)) + + logger.Debug("deleting multiple items from storagepacker; caching and deleting from buckets", "total_items", len(itemIDs)) + + var pctDone int + for idx, itemID := range itemIDs { + bucketKey := s.BucketKey(itemID) + + bucket, bucketFound := bucketCache[bucketKey] + if !bucketFound { + // Read from storage + storageEntry, err := s.view.Get(context.Background(), bucketKey) + if err != nil { + return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) + } + if storageEntry == nil { + return nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + bucket = new(Bucket) + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) + } + } + + // Look for a matching storage entry + foundIdx := -1 + for itemIdx, item := range bucket.Items { + if item.ID == itemID { + foundIdx = itemIdx + break + } + } + + // If there is a match, remove it from the collection and persist the + // resulting collection + if foundIdx != -1 { + bucket.Items[foundIdx] = bucket.Items[len(bucket.Items)-1] + bucket.Items = bucket.Items[:len(bucket.Items)-1] + if !bucketFound { + bucketCache[bucketKey] = bucket + } + } + + newPctDone := idx * 100.0 / len(itemIDs) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("bucket item removal progress", "percent", pctDone, "items_removed", idx) + } + } + + logger.Debug("persisting buckets", "total_buckets", len(bucketCache)) + + // Persist all buckets in the cache; these will be the ones that had + // deletions + pctDone = 0 + idx := 0 + for _, bucket := range bucketCache { + // Fail if the context is canceled, the storage calls will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + + err = s.putBucket(ctx, bucket) + if err != nil { + return err + } + + newPctDone := idx * 100.0 / len(bucketCache) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("bucket persistence progress", "percent", pctDone, "buckets_persisted", idx) + } + + idx++ + } + + return nil +} + +func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error { + if bucket == nil { + return fmt.Errorf("nil bucket entry") + } + + if bucket.Key == "" { + return fmt.Errorf("missing key") + } + + if !strings.HasPrefix(bucket.Key, s.viewPrefix) { + return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.viewPrefix) + } + + marshaledBucket, err := proto.Marshal(bucket) + if err != nil { + return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) + } + + compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeSnappy, + }) + if err != nil { + return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) + } + + // Store the compressed value + err = s.view.Put(ctx, &logical.StorageEntry{ + Key: bucket.Key, + Value: compressedBucket, + }) + if err != nil { + return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) + } + + return nil +} + +// GetItem fetches the storage entry for a given key from its corresponding +// bucket. +func (s *StoragePacker) GetItem(itemID string) (*Item, error) { + if itemID == "" { + return nil, fmt.Errorf("empty item ID") + } + + bucketKey := s.BucketKey(itemID) + + // Fetch the bucket entry + bucket, err := s.GetBucket(bucketKey) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) + } + if bucket == nil { + return nil, nil + } + + // Look for a matching storage entry in the bucket items + for _, item := range bucket.Items { + if item.ID == itemID { + return item, nil + } + } + + return nil, nil +} + +// PutItem stores the given item in its respective bucket +func (s *StoragePacker) PutItem(_ context.Context, item *Item) error { + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing ID in item") + } + + var err error + bucketKey := s.BucketKey(item.ID) + + bucket := &Bucket{ + Key: bucketKey, + } + + // In this case, we persist the storage entry regardless of the read + // storageEntry below is nil or not. Hence, directly acquire write lock + // even to read the entry. + lock := locksutil.LockForKey(s.storageLocks, bucketKey) + lock.Lock() + defer lock.Unlock() + + // Check if there is an existing bucket for a given key + storageEntry, err := s.view.Get(context.Background(), bucketKey) + if err != nil { + return errwrap.Wrapf("failed to read packed storage bucket entry: {{err}}", err) + } + + if storageEntry == nil { + // If the bucket entry does not exist, this will be the only item the + // bucket that is going to be persisted. + bucket.Items = []*Item{ + item, + } + } else { + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) + } + + err = bucket.upsert(item) + if err != nil { + return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) + } + } + + return s.putBucket(context.Background(), bucket) +} + +// NewStoragePacker creates a new storage packer for a given view +func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePacker, error) { + if view == nil { + return nil, fmt.Errorf("nil view") + } + + if viewPrefix == "" { + viewPrefix = StoragePackerBucketsPrefix + } + + if !strings.HasSuffix(viewPrefix, "/") { + viewPrefix = viewPrefix + "/" + } + + // Create a new packer object for the given view + packer := &StoragePacker{ + view: view, + viewPrefix: viewPrefix, + logger: logger, + storageLocks: locksutil.CreateLocks(), + } + + return packer, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/storagepacker/types.pb.go b/vendor/github.com/hashicorp/vault/helper/storagepacker/types.pb.go new file mode 100644 index 00000000..8d42ec07 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/storagepacker/types.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: helper/storagepacker/types.proto + +package storagepacker + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Item represents an entry that gets inserted into the storage packer +type Item struct { + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // message is the contents of the item + Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Item) Reset() { *m = Item{} } +func (m *Item) String() string { return proto.CompactTextString(m) } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { + return fileDescriptor_c0e98c66c4f51b7f, []int{0} +} + +func (m *Item) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Item.Unmarshal(m, b) +} +func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Item.Marshal(b, m, deterministic) +} +func (m *Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_Item.Merge(m, src) +} +func (m *Item) XXX_Size() int { + return xxx_messageInfo_Item.Size(m) +} +func (m *Item) XXX_DiscardUnknown() { + xxx_messageInfo_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_Item proto.InternalMessageInfo + +func (m *Item) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Item) GetMessage() *any.Any { + if m != nil { + return m.Message + } + return nil +} + +// Bucket is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +type Bucket struct { + // Key is the storage path where the bucket gets stored + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Items holds the items contained within this bucket. Used by v1. + Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + // ItemMap stores a mapping of item ID to message. Used by v2. + ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_c0e98c66c4f51b7f, []int{1} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Bucket) GetItems() []*Item { + if m != nil { + return m.Items + } + return nil +} + +func (m *Bucket) GetItemMap() map[string]*any.Any { + if m != nil { + return m.ItemMap + } + return nil +} + +func init() { + proto.RegisterType((*Item)(nil), "storagepacker.Item") + proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") + proto.RegisterMapType((map[string]*any.Any)(nil), "storagepacker.Bucket.ItemMapEntry") +} + +func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } + +var fileDescriptor_c0e98c66c4f51b7f = []byte{ + // 276 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcf, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0x69, 0xeb, 0x36, 0x7d, 0x53, 0x91, 0xe8, 0xa1, 0xee, 0x54, 0x7a, 0xaa, 0x1e, 0x12, + 0x9c, 0x17, 0x11, 0x3c, 0x38, 0x50, 0xf0, 0x20, 0x48, 0x8f, 0x5e, 0x24, 0xed, 0x9e, 0x6d, 0xe8, + 0x8f, 0x84, 0x24, 0x1d, 0xf4, 0x1f, 0xf5, 0xef, 0x91, 0x36, 0x0e, 0x9c, 0x0c, 0x6f, 0x2f, 0x7c, + 0x3f, 0xf9, 0xe4, 0x1b, 0x1e, 0x44, 0x25, 0xd6, 0x0a, 0x35, 0x33, 0x56, 0x6a, 0x5e, 0xa0, 0xe2, + 0x79, 0x85, 0x9a, 0xd9, 0x5e, 0xa1, 0xa1, 0x4a, 0x4b, 0x2b, 0xc9, 0xc9, 0x4e, 0xb4, 0xb8, 0x2c, + 0xa4, 0x2c, 0x6a, 0x64, 0x63, 0x98, 0x75, 0x9f, 0x8c, 0xb7, 0xbd, 0x23, 0xe3, 0x67, 0x38, 0x78, + 0xb1, 0xd8, 0x90, 0x53, 0xf0, 0xc5, 0x3a, 0xf4, 0x22, 0x2f, 0x39, 0x4a, 0x7d, 0xb1, 0x26, 0x14, + 0x66, 0x0d, 0x1a, 0xc3, 0x0b, 0x0c, 0xfd, 0xc8, 0x4b, 0xe6, 0xcb, 0x0b, 0xea, 0x24, 0x74, 0x2b, + 0xa1, 0x8f, 0x6d, 0x9f, 0x6e, 0xa1, 0xf8, 0xcb, 0x83, 0xe9, 0xaa, 0xcb, 0x2b, 0xb4, 0xe4, 0x0c, + 0x82, 0x0a, 0xfb, 0x1f, 0xd7, 0x30, 0x92, 0x2b, 0x98, 0x08, 0x8b, 0x8d, 0x09, 0xfd, 0x28, 0x48, + 0xe6, 0xcb, 0x73, 0xba, 0x53, 0x8f, 0x0e, 0x05, 0x52, 0x47, 0x90, 0x07, 0x38, 0x1c, 0x86, 0x8f, + 0x86, 0xab, 0x30, 0x18, 0xe9, 0xf8, 0x0f, 0xed, 0x5e, 0x19, 0x2f, 0xbd, 0x72, 0xf5, 0xd4, 0x5a, + 0xdd, 0xa7, 0x33, 0xe1, 0x4e, 0x8b, 0x37, 0x38, 0xfe, 0x1d, 0xec, 0xe9, 0x72, 0x0d, 0x93, 0x0d, + 0xaf, 0xbb, 0xff, 0xbf, 0xe5, 0x90, 0x7b, 0xff, 0xce, 0x5b, 0xdd, 0xbc, 0xb3, 0x42, 0xd8, 0xb2, + 0xcb, 0x68, 0x2e, 0x1b, 0x56, 0x72, 0x53, 0x8a, 0x5c, 0x6a, 0xc5, 0x36, 0xbc, 0xab, 0x2d, 0xdb, + 0xb7, 0x89, 0x6c, 0x3a, 0xba, 0x6e, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x46, 0x9d, 0x8a, 0xcb, + 0xa8, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/helper/storagepacker/types.proto b/vendor/github.com/hashicorp/vault/helper/storagepacker/types.proto new file mode 100644 index 00000000..4edfaf4f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/storagepacker/types.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/storagepacker"; + +package storagepacker; + +import "google/protobuf/any.proto"; + +// Item represents an entry that gets inserted into the storage packer +message Item { + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + string id = 1; + // message is the contents of the item + google.protobuf.Any message = 2; +} + +// Bucket is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +message Bucket { + // Key is the storage path where the bucket gets stored + string key = 1; + // Items holds the items contained within this bucket. Used by v1. + repeated Item items = 2; + // ItemMap stores a mapping of item ID to message. Used by v2. + map item_map = 3; +} diff --git a/vendor/github.com/hashicorp/vault/helper/xor/xor.go b/vendor/github.com/hashicorp/vault/helper/xor/xor.go new file mode 100644 index 00000000..0d9567eb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/xor/xor.go @@ -0,0 +1,48 @@ +package xor + +import ( + "encoding/base64" + "fmt" + + "github.com/hashicorp/errwrap" +) + +// XORBytes takes two byte slices and XORs them together, returning the final +// byte slice. It is an error to pass in two byte slices that do not have the +// same length. +func XORBytes(a, b []byte) ([]byte, error) { + if len(a) != len(b) { + return nil, fmt.Errorf("length of byte slices is not equivalent: %d != %d", len(a), len(b)) + } + + buf := make([]byte, len(a)) + + for i, _ := range a { + buf[i] = a[i] ^ b[i] + } + + return buf, nil +} + +// XORBase64 takes two base64-encoded strings and XORs the decoded byte slices +// together, returning the final byte slice. It is an error to pass in two +// strings that do not have the same length to their base64-decoded byte slice. +func XORBase64(a, b string) ([]byte, error) { + aBytes, err := base64.StdEncoding.DecodeString(a) + if err != nil { + return nil, errwrap.Wrapf("error decoding first base64 value: {{err}}", err) + } + if aBytes == nil || len(aBytes) == 0 { + return nil, fmt.Errorf("decoded first base64 value is nil or empty") + } + + bBytes, err := base64.StdEncoding.DecodeString(b) + if err != nil { + return nil, errwrap.Wrapf("error decoding second base64 value: {{err}}", err) + } + if bBytes == nil || len(bBytes) == 0 { + return nil, fmt.Errorf("decoded second base64 value is nil or empty") + } + + return XORBytes(aBytes, bBytes) +} diff --git a/vendor/github.com/hashicorp/vault/http/cors.go b/vendor/github.com/hashicorp/vault/http/cors.go new file mode 100644 index 00000000..de24c8ca --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/cors.go @@ -0,0 +1,67 @@ +package http + +import ( + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/vault" +) + +var allowedMethods = []string{ + http.MethodDelete, + http.MethodGet, + http.MethodOptions, + http.MethodPost, + http.MethodPut, + "LIST", // LIST is not an official HTTP method, but Vault supports it. +} + +func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + corsConf := core.CORSConfig() + + // If CORS is not enabled or if no Origin header is present (i.e. the request + // is from the Vault CLI. A browser will always send an Origin header), then + // just return a 204. + if !corsConf.IsEnabled() { + h.ServeHTTP(w, req) + return + } + + origin := req.Header.Get("Origin") + requestMethod := req.Header.Get("Access-Control-Request-Method") + + if origin == "" { + h.ServeHTTP(w, req) + return + } + + // Return a 403 if the origin is not allowed to make cross-origin requests. + if !corsConf.IsValidOrigin(origin) { + respondError(w, http.StatusForbidden, fmt.Errorf("origin not allowed")) + return + } + + if req.Method == http.MethodOptions && !strutil.StrListContains(allowedMethods, requestMethod) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + + // apply headers for preflight requests + if req.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ",")) + w.Header().Set("Access-Control-Allow-Headers", strings.Join(corsConf.AllowedHeaders, ",")) + w.Header().Set("Access-Control-Max-Age", "300") + + return + } + + h.ServeHTTP(w, req) + return + }) +} diff --git a/vendor/github.com/hashicorp/vault/http/handler.go b/vendor/github.com/hashicorp/vault/http/handler.go new file mode 100644 index 00000000..7f5fbb6e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/handler.go @@ -0,0 +1,920 @@ +package http + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "strings" + "time" + + "github.com/NYTimes/gziphandler" + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const ( + // WrapTTLHeaderName is the name of the header containing a directive to + // wrap the response + WrapTTLHeaderName = "X-Vault-Wrap-TTL" + + // WrapFormatHeaderName is the name of the header containing the format to + // wrap in; has no effect if the wrap TTL is not set + WrapFormatHeaderName = "X-Vault-Wrap-Format" + + // NoRequestForwardingHeaderName is the name of the header telling Vault + // not to use request forwarding + NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding" + + // MFAHeaderName represents the HTTP header which carries the credentials + // required to perform MFA on any path. + MFAHeaderName = "X-Vault-MFA" + + // canonicalMFAHeaderName is the MFA header value's format in the request + // headers. Do not alter the casing of this string. + canonicalMFAHeaderName = "X-Vault-Mfa" + + // PolicyOverrideHeaderName is the header set to request overriding + // soft-mandatory Sentinel policies. + PolicyOverrideHeaderName = "X-Vault-Policy-Override" + + // DefaultMaxRequestSize is the default maximum accepted request size. This + // is to prevent a denial of service attack where no Content-Length is + // provided and the server is fed ever more data until it exhausts memory. + // Can be overridden per listener. + DefaultMaxRequestSize = 32 * 1024 * 1024 +) + +var ( + // Set to false by stub_asset if the ui build tag isn't enabled + uiBuiltIn = true + + // perfStandbyAlwaysForwardPaths is used to check a requested path against + // the always forward list + perfStandbyAlwaysForwardPaths = pathmanager.New() + alwaysRedirectPaths = pathmanager.New() + + injectDataIntoTopRoutes = []string{ + "/v1/sys/audit", + "/v1/sys/audit/", + "/v1/sys/audit-hash/", + "/v1/sys/auth", + "/v1/sys/auth/", + "/v1/sys/config/cors", + "/v1/sys/config/auditing/request-headers/", + "/v1/sys/config/auditing/request-headers", + "/v1/sys/capabilities", + "/v1/sys/capabilities-accessor", + "/v1/sys/capabilities-self", + "/v1/sys/key-status", + "/v1/sys/mounts", + "/v1/sys/mounts/", + "/v1/sys/policy", + "/v1/sys/policy/", + "/v1/sys/rekey/backup", + "/v1/sys/rekey/recovery-key-backup", + "/v1/sys/remount", + "/v1/sys/rotate", + "/v1/sys/wrapping/wrap", + } +) + +func init() { + alwaysRedirectPaths.AddPaths([]string{ + "sys/storage/raft/snapshot", + "sys/storage/raft/snapshot-force", + }) +} + +// Handler returns an http.Handler for the API. This can be used on +// its own to mount the Vault API within another web server. +func Handler(props *vault.HandlerProperties) http.Handler { + core := props.Core + + // Create the muxer to handle the actual endpoints + mux := http.NewServeMux() + + switch { + case props.RecoveryMode: + raw := vault.NewRawBackend(core) + strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken) + mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken)) + mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy)) + mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy)) + default: + // Handle non-forwarded paths + mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core)) + + mux.Handle("/v1/sys/init", handleSysInit(core)) + mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core)) + mux.Handle("/v1/sys/seal", handleSysSeal(core)) + mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core))) + mux.Handle("/v1/sys/unseal", handleSysUnseal(core)) + mux.Handle("/v1/sys/leader", handleSysLeader(core)) + mux.Handle("/v1/sys/health", handleSysHealth(core)) + mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))) + mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))) + mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false))) + mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false))) + mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false))) + mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true))) + mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true))) + mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true))) + mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) + for _, path := range injectDataIntoTopRoutes { + mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) + } + mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core))) + mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core))) + if core.UIEnabled() == true { + if uiBuiltIn { + mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))) + mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))) + } else { + mux.Handle("/ui/", handleUIHeaders(core, handleUIStub())) + } + mux.Handle("/ui", handleUIRedirect()) + mux.Handle("/", handleUIRedirect()) + + } + + // Register metrics path without authentication if enabled + if props.UnauthenticatedMetricsAccess { + mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core)) + } + + additionalRoutes(mux, core) + } + + // Wrap the handler in another handler to trigger all help paths. + helpWrappedHandler := wrapHelpHandler(mux, core) + corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core) + + genericWrappedHandler := genericWrapping(core, corsWrappedHandler, props) + + // Wrap the handler with PrintablePathCheckHandler to check for non-printable + // characters in the request path. + printablePathCheckHandler := genericWrappedHandler + if !props.DisablePrintableCheck { + printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil) + } + + return printablePathCheckHandler +} + +// wrapGenericHandler wraps the handler with an extra layer of handler where +// tasks that should be commonly handled for all the requests and/or responses +// are performed. +func wrapGenericHandler(core *vault.Core, h http.Handler, maxRequestSize int64, maxRequestDuration time.Duration) http.Handler { + if maxRequestDuration == 0 { + maxRequestDuration = vault.DefaultMaxRequestDuration + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Set the Cache-Control header for all the responses returned + // by Vault + w.Header().Set("Cache-Control", "no-store") + + // Start with the request context + ctx := r.Context() + var cancelFunc context.CancelFunc + // Add our timeout + ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration) + // Add a size limiter if desired + if maxRequestSize > 0 { + ctx = context.WithValue(ctx, "max_request_size", maxRequestSize) + } + ctx = context.WithValue(ctx, "original_request_path", r.URL.Path) + r = r.WithContext(ctx) + + switch { + case strings.HasPrefix(r.URL.Path, "/v1/"): + newR, status := adjustRequest(core, r) + if status != 0 { + respondError(w, status, nil) + cancelFunc() + return + } + r = newR + + case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/": + default: + respondError(w, http.StatusNotFound, nil) + cancelFunc() + return + } + + h.ServeHTTP(w, r) + cancelFunc() + return + }) +} + +func WrapForwardedForHandler(h http.Handler, authorizedAddrs []*sockaddr.SockAddrMarshaler, rejectNotPresent, rejectNonAuthz bool, hopSkips int) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")] + if !headersOK || len(headers) == 0 { + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present")) + return + } + + host, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + // If not rejecting treat it like we just don't have a valid + // header because we can't do a comparison against an address we + // can't understand + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client hostport: {{err}}", err)) + return + } + + addr, err := sockaddr.NewIPAddr(host) + if err != nil { + // We treat this the same as the case above + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client address: {{err}}", err)) + return + } + + var found bool + for _, authz := range authorizedAddrs { + if authz.Contains(addr) { + found = true + break + } + } + if !found { + // If we didn't find it and aren't configured to reject, simply + // don't trust it + if !rejectNonAuthz { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection")) + return + } + + // At this point we have at least one value and it's authorized + + // Split comma separated ones, which are common. This brings it in line + // to the multiple-header case. + var acc []string + for _, header := range headers { + vals := strings.Split(header, ",") + for _, v := range vals { + acc = append(acc, strings.TrimSpace(v)) + } + } + + indexToUse := len(acc) - 1 - hopSkips + if indexToUse < 0 { + // This is likely an error in either configuration or other + // infrastructure. We could either deny the request, or we + // could simply not trust the value. Denying the request is + // "safer" since if this logic is configured at all there may + // be an assumption it can always be trusted. Given that we can + // deny accepting the request at all if it's not from an + // authorized address, if we're at this point the address is + // authorized (or we've turned off explicit rejection) and we + // should assume that what comes in should be properly + // formatted. + respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers))) + return + } + + r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port) + h.ServeHTTP(w, r) + return + }) +} + +// A lookup on a token that is about to expire returns nil, which means by the +// time we can validate a wrapping token lookup will return nil since it will +// be revoked after the call. So we have to do the validation here. +func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error { + if req == nil { + return fmt.Errorf("invalid request") + } + + valid, err := core.ValidateWrappingToken(ctx, req) + if err != nil { + return errwrap.Wrapf("error validating wrapping token: {{err}}", err) + } + if !valid { + return consts.ErrInvalidWrappingToken + } + + return nil +} + +// stripPrefix is a helper to strip a prefix from the path. It will +// return false from the second return value if it the prefix doesn't exist. +func stripPrefix(prefix, path string) (string, bool) { + if !strings.HasPrefix(path, prefix) { + return "", false + } + + path = path[len(prefix):] + if path == "" { + return "", false + } + + return path, true +} + +func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + header := w.Header() + + userHeaders, err := core.UIHeaders() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + if userHeaders != nil { + for k := range userHeaders { + v := userHeaders.Get(k) + header.Set(k, v) + } + } + h.ServeHTTP(w, req) + }) +} + +func handleUI(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + + // The fileserver handler strips trailing slashes and does a redirect. + // We don't want the redirect to happen so we preemptively trim the slash + // here. + req.URL.Path = strings.TrimSuffix(req.URL.Path, "/") + h.ServeHTTP(w, req) + return + }) +} + +func handleUIStub() http.Handler { + stubHTML := ` + + + +
+
+
+ + + +

Vault UI is not available in this binary.

+
+

To get Vault UI do one of the following:

+
    +
  • Download an official release
  • +
  • Run make release to create your own release binaries. +
  • Run make dev-ui to create a development binary with the UI. +
+
+
+ + ` + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(stubHTML)) + }) +} + +func handleUIRedirect() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, "/ui/", 307) + return + }) +} + +type UIAssetWrapper struct { + FileSystem *assetfs.AssetFS +} + +func (fs *UIAssetWrapper) Open(name string) (http.File, error) { + file, err := fs.FileSystem.Open(name) + if err == nil { + return file, nil + } + // serve index.html instead of 404ing + if err == os.ErrNotExist { + return fs.FileSystem.Open("index.html") + } + return nil, err +} + +func parseQuery(values url.Values) map[string]interface{} { + data := map[string]interface{}{} + for k, v := range values { + // Skip the help key as this is a reserved parameter + if k == "help" { + continue + } + + switch { + case len(v) == 0: + case len(v) == 1: + data[k] = v[0] + default: + data[k] = v + } + } + + if len(data) > 0 { + return data + } + return nil +} + +func parseRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) { + // Limit the maximum number of bytes to MaxRequestSize to protect + // against an indefinite amount of data being read. + reader := r.Body + ctx := r.Context() + maxRequestSize := ctx.Value("max_request_size") + if maxRequestSize != nil { + max, ok := maxRequestSize.(int64) + if !ok { + return nil, errors.New("could not parse max_request_size from request context") + } + if max > 0 { + reader = http.MaxBytesReader(w, r.Body, max) + } + } + var origBody io.ReadWriter + if perfStandby { + // Since we're checking PerfStandby here we key on origBody being nil + // or not later, so we need to always allocate so it's non-nil + origBody = new(bytes.Buffer) + reader = ioutil.NopCloser(io.TeeReader(reader, origBody)) + } + err := jsonutil.DecodeJSONFromReader(reader, out) + if err != nil && err != io.EOF { + return nil, errwrap.Wrapf("failed to parse JSON input: {{err}}", err) + } + if origBody != nil { + return ioutil.NopCloser(origBody), err + } + return nil, err +} + +// handleRequestForwarding determines whether to forward a request or not, +// falling back on the older behavior of redirecting the client +func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // If we are a performance standby we can handle the request. + if core.PerfStandby() { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + switch { + case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path): + handler.ServeHTTP(w, r) + return + case strings.HasPrefix(path, "auth/token/create/"): + isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path) + if err == nil && isBatch { + handler.ServeHTTP(w, r) + return + } + } + } + + // Note: in an HA setup, this call will also ensure that connections to + // the leader are set up, as that happens once the advertised cluster + // values are read during this function + isLeader, leaderAddr, _, err := core.Leader() + if err != nil { + if err == vault.ErrHANotEnabled { + // Standalone node, serve request normally + handler.ServeHTTP(w, r) + return + } + // Some internal error occurred + respondError(w, http.StatusInternalServerError, err) + return + } + if isLeader { + // No forwarding needed, we're leader + handler.ServeHTTP(w, r) + return + } + if leaderAddr == "" { + respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found")) + return + } + + forwardRequest(core, w, r) + return + }) +} + +func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { + if r.Header.Get(vault.IntNoForwardingHeaderName) != "" { + respondStandby(core, w, r.URL) + return + } + + if r.Header.Get(NoRequestForwardingHeaderName) != "" { + // Forwarding explicitly disabled, fall back to previous behavior + core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request") + respondStandby(core, w, r.URL) + return + } + + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + if alwaysRedirectPaths.HasPath(path) { + respondStandby(core, w, r.URL) + return + } + + // Attempt forwarding the request. If we cannot forward -- perhaps it's + // been disabled on the active node -- this will return with an + // ErrCannotForward and we simply fall back + statusCode, header, retBytes, err := core.ForwardRequest(r) + if err != nil { + if err == vault.ErrCannotForward { + core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back") + } else { + core.Logger().Error("forward request error", "error", err) + } + + // Fall back to redirection + respondStandby(core, w, r.URL) + return + } + + if header != nil { + for k, v := range header { + w.Header()[k] = v + } + } + + w.WriteHeader(statusCode) + w.Write(retBytes) +} + +// request is a helper to perform a request and properly exit in the +// case of an error. +func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) { + resp, err := core.HandleRequest(rawReq.Context(), r) + if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.") + } + if errwrap.Contains(err, consts.ErrStandby.Error()) { + respondStandby(core, w, rawReq.URL) + return resp, false, false + } + if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) { + return nil, false, true + } + + if resp != nil && len(resp.Headers) > 0 { + // Set this here so it will take effect regardless of any other type of + // response processing + header := w.Header() + for k, v := range resp.Headers { + for _, h := range v { + header.Add(k, h) + } + } + + switch { + case resp.Secret != nil, + resp.Auth != nil, + len(resp.Data) > 0, + resp.Redirect != "", + len(resp.Warnings) > 0, + resp.WrapInfo != nil: + // Nothing, resp has data + + default: + // We have an otherwise totally empty response except for headers, + // so nil out the response now that the headers are written out + resp = nil + } + } + + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. If the response writer + // is set but has not been written to it likely means there was some kind of + // error + if r.ResponseWriter != nil && r.ResponseWriter.Written() { + return nil, true, false + } + + if respondErrorCommon(w, r, resp, err) { + return resp, false, false + } + + return resp, true, false +} + +// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby +func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { + // Request the leader address + _, redirectAddr, _, err := core.Leader() + if err != nil { + if err == vault.ErrHANotEnabled { + // Standalone node, serve 503 + err = errors.New("node is not active") + respondError(w, http.StatusServiceUnavailable, err) + return + } + + respondError(w, http.StatusInternalServerError, err) + return + } + + // If there is no leader, generate a 503 error + if redirectAddr == "" { + err = errors.New("no active Vault instance found") + respondError(w, http.StatusServiceUnavailable, err) + return + } + + // Parse the redirect location + redirectURL, err := url.Parse(redirectAddr) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // Generate a redirect URL + finalURL := url.URL{ + Scheme: redirectURL.Scheme, + Host: redirectURL.Host, + Path: reqURL.Path, + RawQuery: reqURL.RawQuery, + } + + // Ensure there is a scheme, default to https + if finalURL.Scheme == "" { + finalURL.Scheme = "https" + } + + // If we have an address, redirect! We use a 307 code + // because we don't actually know if its permanent and + // the request method should be preserved. + w.Header().Set("Location", finalURL.String()) + w.WriteHeader(307) +} + +// getTokenFromReq parse headers of the incoming request to extract token if +// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header. +// Returns true if the token was sourced from a Bearer header. +func getTokenFromReq(r *http.Request) (string, bool) { + if token := r.Header.Get(consts.AuthHeaderName); token != "" { + return token, false + } + if headers, ok := r.Header["Authorization"]; ok { + // Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3 + + // If string does not start by 'Bearer ', it is not one we would use, + // but might be used by plugins + for _, v := range headers { + if !strings.HasPrefix(v, "Bearer ") { + continue + } + return strings.TrimSpace(v[7:]), true + } + } + return "", false +} + +// requestAuth adds the token to the logical.Request if it exists. +func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) { + // Attach the header value if we have it + token, fromAuthzHeader := getTokenFromReq(r) + if token != "" { + req.ClientToken = token + req.ClientTokenSource = logical.ClientTokenFromVaultHeader + if fromAuthzHeader { + req.ClientTokenSource = logical.ClientTokenFromAuthzHeader + } + + // Also attach the accessor if we have it. This doesn't fail if it + // doesn't exist because the request may be to an unauthenticated + // endpoint/login endpoint where a bad current token doesn't matter, or + // a token from a Vault version pre-accessors. We ignore errors for + // JWTs. + te, err := core.LookupToken(r.Context(), token) + if err != nil { + dotCount := strings.Count(token, ".") + // If we have two dots but the second char is a dot it's a vault + // token of the form s.SOMETHING.nsid, not a JWT + if dotCount != 2 || + dotCount == 2 && token[1] == '.' { + return req, err + } + } + if err == nil && te != nil { + req.ClientTokenAccessor = te.Accessor + req.ClientTokenRemainingUses = te.NumUses + req.SetTokenEntry(te) + } + } + + return req, nil +} + +func requestPolicyOverride(r *http.Request, req *logical.Request) error { + raw := r.Header.Get(PolicyOverrideHeaderName) + if raw == "" { + return nil + } + + override, err := parseutil.ParseBool(raw) + if err != nil { + return err + } + + req.PolicyOverride = override + return nil +} + +// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists +func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) { + // First try for the header value + wrapTTL := r.Header.Get(WrapTTLHeaderName) + if wrapTTL == "" { + return req, nil + } + + // If it has an allowed suffix parse as a duration string + dur, err := parseutil.ParseDurationSecond(wrapTTL) + if err != nil { + return req, err + } + if int64(dur) < 0 { + return req, fmt.Errorf("requested wrap ttl cannot be negative") + } + + req.WrapInfo = &logical.RequestWrapInfo{ + TTL: dur, + } + + wrapFormat := r.Header.Get(WrapFormatHeaderName) + switch wrapFormat { + case "jwt": + req.WrapInfo.Format = "jwt" + } + + return req, nil +} + +// parseMFAHeader parses the MFAHeaderName in the request headers and organizes +// them with MFA method name as the index. +func parseMFAHeader(req *logical.Request) error { + if req == nil { + return fmt.Errorf("request is nil") + } + + if req.Headers == nil { + return nil + } + + // Reset and initialize the credentials in the request + req.MFACreds = make(map[string][]string) + + for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] { + // Skip the header with no value in it + if mfaHeaderValue == "" { + continue + } + + // Handle the case where only method name is mentioned and no value + // is supplied + if !strings.Contains(mfaHeaderValue, ":") { + // Mark the presence of method name, but set an empty set to it + // indicating that there were no values supplied for the method + if req.MFACreds[mfaHeaderValue] == nil { + req.MFACreds[mfaHeaderValue] = []string{} + } + continue + } + + shardSplits := strings.SplitN(mfaHeaderValue, ":", 2) + if shardSplits[0] == "" { + return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName) + } + + if shardSplits[1] == "" { + return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName) + } + + req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1]) + } + + return nil +} + +func respondError(w http.ResponseWriter, status int, err error) { + logical.RespondError(w, status, err) +} + +func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool { + statusCode, newErr := logical.RespondErrorCommon(req, resp, err) + if newErr == nil && statusCode == 0 { + return false + } + + respondError(w, statusCode, newErr) + return true +} + +func respondOk(w http.ResponseWriter, body interface{}) { + w.Header().Set("Content-Type", "application/json") + + if body == nil { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + enc.Encode(body) + } +} diff --git a/vendor/github.com/hashicorp/vault/http/help.go b/vendor/github.com/hashicorp/vault/http/help.go new file mode 100644 index 00000000..61941159 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/help.go @@ -0,0 +1,58 @@ +package http + +import ( + "net/http" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) { + // If the help parameter is not blank, then show the help. We request + // forward because standby nodes do not have mounts and other state. + if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" { + handleRequestForwarding(core, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handleHelp(core, w, r) + })).ServeHTTP(writer, req) + return + } + + h.ServeHTTP(writer, req) + return + }) +} + +func handleHelp(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, nil) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + + req, err := requestAuth(core, r, &logical.Request{ + Operation: logical.HelpOperation, + Path: path, + Connection: getConnection(r), + }) + if err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, nil) + return + } + respondError(w, http.StatusBadRequest, errwrap.Wrapf("error performing token check: {{err}}", err)) + return + } + + resp, err := core.HandleRequest(r.Context(), req) + if err != nil { + respondErrorCommon(w, req, resp, err) + return + } + + respondOk(w, resp.Data) +} diff --git a/vendor/github.com/hashicorp/vault/http/logical.go b/vendor/github.com/hashicorp/vault/http/logical.go new file mode 100644 index 00000000..445c842b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/logical.go @@ -0,0 +1,535 @@ +package http + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "go.uber.org/atomic" +) + +func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + return nil, nil, http.StatusBadRequest, nil + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + + var data map[string]interface{} + var origBody io.ReadCloser + var passHTTPReq bool + var responseWriter http.ResponseWriter + + // Determine the operation + var op logical.Operation + switch r.Method { + case "DELETE": + op = logical.DeleteOperation + data = parseQuery(r.URL.Query()) + case "GET": + op = logical.ReadOperation + queryVals := r.URL.Query() + var list bool + var err error + listStr := queryVals.Get("list") + if listStr != "" { + list, err = strconv.ParseBool(listStr) + if err != nil { + return nil, nil, http.StatusBadRequest, nil + } + if list { + op = logical.ListOperation + if !strings.HasSuffix(path, "/") { + path += "/" + } + } + } + + if !list { + data = parseQuery(queryVals) + } + + switch { + case strings.HasPrefix(path, "sys/pprof/"): + passHTTPReq = true + responseWriter = w + case path == "sys/storage/raft/snapshot": + responseWriter = w + } + + case "POST", "PUT": + op = logical.UpdateOperation + // Parse the request if we can + if op == logical.UpdateOperation { + // If we are uploading a snapshot we don't want to parse it. Instead + // we will simply add the HTTP request to the logical request object + // for later consumption. + if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" { + passHTTPReq = true + origBody = r.Body + } else { + origBody, err = parseRequest(perfStandby, r, w, &data) + if err == io.EOF { + data = nil + err = nil + } + if err != nil { + return nil, nil, http.StatusBadRequest, err + } + } + } + + case "LIST": + op = logical.ListOperation + if !strings.HasSuffix(path, "/") { + path += "/" + } + + case "OPTIONS": + default: + return nil, nil, http.StatusMethodNotAllowed, nil + } + + request_id, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, http.StatusBadRequest, errwrap.Wrapf("failed to generate identifier for the request: {{err}}", err) + } + + req := &logical.Request{ + ID: request_id, + Operation: op, + Path: path, + Data: data, + Connection: getConnection(r), + Headers: r.Header, + } + + if passHTTPReq { + req.HTTPRequest = r + } + if responseWriter != nil { + req.ResponseWriter = logical.NewHTTPResponseWriter(responseWriter) + } + + return req, origBody, 0, nil +} + +func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { + req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + if err != nil || status != 0 { + return nil, nil, status, err + } + + req, err = requestAuth(core, r, req) + if err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + return nil, nil, http.StatusForbidden, nil + } + return nil, nil, http.StatusBadRequest, errwrap.Wrapf("error performing token check: {{err}}", err) + } + + req, err = requestWrapInfo(r, req) + if err != nil { + return nil, nil, http.StatusBadRequest, errwrap.Wrapf("error parsing X-Vault-Wrap-TTL header: {{err}}", err) + } + + err = parseMFAHeader(req) + if err != nil { + return nil, nil, http.StatusBadRequest, errwrap.Wrapf("failed to parse X-Vault-MFA header: {{err}}", err) + } + + err = requestPolicyOverride(r, req) + if err != nil { + return nil, nil, http.StatusBadRequest, errwrap.Wrapf(fmt.Sprintf(`failed to parse %s header: {{err}}`, PolicyOverrideHeaderName), err) + } + + return req, origBody, 0, nil +} + +// handleLogical returns a handler for processing logical requests. These requests +// may or may not end up getting forwarded under certain scenarios if the node +// is a performance standby. Some of these cases include: +// - Perf standby and token with limited use count. +// - Perf standby and token re-validation needed (e.g. due to invalid token). +// - Perf standby and control group error. +func handleLogical(core *vault.Core) http.Handler { + return handleLogicalInternal(core, false, false) +} + +// handleLogicalWithInjector returns a handler for processing logical requests +// that also have their logical response data injected at the top-level payload. +// All forwarding behavior remains the same as `handleLogical`. +func handleLogicalWithInjector(core *vault.Core) http.Handler { + return handleLogicalInternal(core, true, false) +} + +// handleLogicalNoForward returns a handler for processing logical local-only +// requests. These types of requests never forwarded, and return an +// `vault.ErrCannotForwardLocalOnly` error if attempted to do so. +func handleLogicalNoForward(core *vault.Core) http.Handler { + return handleLogicalInternal(core, false, true) +} + +func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequestNoAuth(false, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + reqToken := r.Header.Get(consts.AuthHeaderName) + if reqToken == "" || token.Load() == "" || reqToken != token.Load() { + respondError(w, http.StatusForbidden, nil) + } + + resp, err := raw.HandleRequest(r.Context(), req) + if respondErrorCommon(w, req, resp, err) { + return + } + + var httpResp *logical.HTTPResponse + if resp != nil { + httpResp = logical.LogicalResponseToHTTPResponse(resp) + httpResp.RequestID = req.ID + } + respondOk(w, httpResp) + }) +} + +// handleLogicalInternal is a common helper that returns a handler for +// processing logical requests. The behavior depends on the various boolean +// toggles. Refer to usage on functions for possible behaviors. +func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, origBody, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + // Always forward requests that are using a limited use count token. + if core.PerfStandby() && req.ClientTokenRemainingUses > 0 { + // Prevent forwarding on local-only requests. + if noForward { + respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) + return + } + + if origBody != nil { + r.Body = origBody + } + forwardRequest(core, w, r) + return + } + + // req.Path will be relative by this point. The prefix check is first + // to fail faster if we're not in this situation since it's a hot path + switch { + case strings.HasPrefix(req.Path, "sys/wrapping/"), strings.HasPrefix(req.Path, "auth/token/"): + // Get the token ns info; if we match the paths below we want to + // swap in the token context (but keep the relative path) + te := req.TokenEntry() + newCtx := r.Context() + if te != nil { + ns, err := vault.NamespaceByID(newCtx, te.NamespaceID, core) + if err != nil { + core.Logger().Warn("error looking up namespace from the token's namespace ID", "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + if ns != nil { + newCtx = namespace.ContextWithNamespace(newCtx, ns) + } + } + switch req.Path { + // Route the token wrapping request to its respective sys NS + case "sys/wrapping/lookup", "sys/wrapping/rewrap", "sys/wrapping/unwrap": + r = r.WithContext(newCtx) + if err := wrappingVerificationFunc(r.Context(), core, req); err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, err) + } else { + respondError(w, http.StatusBadRequest, err) + } + return + } + + // The -self paths have no meaning outside of the token NS, so + // requests for these paths always go to the token NS + case "auth/token/lookup-self", "auth/token/renew-self", "auth/token/revoke-self": + r = r.WithContext(newCtx) + + // For the following operations, we can set the proper namespace context + // using the token's embedded nsID if a relative path was provided. Since + // this is done at the HTTP layer, the operation will still be gated by + // ACLs. + case "auth/token/lookup", "auth/token/renew", "auth/token/revoke", "auth/token/revoke-orphan": + token, ok := req.Data["token"] + // If the token is not present (e.g. a bad request), break out and let the backend + // handle the error + if !ok { + // If this is a token lookup request and if the token is not + // explicitly provided, it will use the client token so we simply set + // the context to the client token's context. + if req.Path == "auth/token/lookup" { + r = r.WithContext(newCtx) + } + break + } + _, nsID := namespace.SplitIDFromString(token.(string)) + if nsID != "" { + ns, err := vault.NamespaceByID(newCtx, nsID, core) + if err != nil { + core.Logger().Warn("error looking up namespace from the token's namespace ID", "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + if ns != nil { + newCtx = namespace.ContextWithNamespace(newCtx, ns) + r = r.WithContext(newCtx) + } + } + } + + // The following relative sys/leases/ paths handles re-routing requests + // to the proper namespace using the lease ID on applicable paths. + case strings.HasPrefix(req.Path, "sys/leases/"): + switch req.Path { + // For the following operations, we can set the proper namespace context + // using the lease's embedded nsID if a relative path was provided. Since + // this is done at the HTTP layer, the operation will still be gated by + // ACLs. + case "sys/leases/lookup", "sys/leases/renew", "sys/leases/revoke", "sys/leases/revoke-force": + leaseID, ok := req.Data["lease_id"] + // If lease ID is not present, break out and let the backend handle the error + if !ok { + break + } + _, nsID := namespace.SplitIDFromString(leaseID.(string)) + if nsID != "" { + newCtx := r.Context() + ns, err := vault.NamespaceByID(newCtx, nsID, core) + if err != nil { + core.Logger().Warn("error looking up namespace from the lease's namespace ID", "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + if ns != nil { + newCtx = namespace.ContextWithNamespace(newCtx, ns) + r = r.WithContext(newCtx) + } + } + } + } + + // Make the internal request. We attach the connection info + // as well in case this is an authentication request that requires + // it. Vault core handles stripping this if we need to. This also + // handles all error cases; if we hit respondLogical, the request is a + // success. + resp, ok, needsForward := request(core, w, r, req) + switch { + case needsForward && noForward: + respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) + return + case needsForward && !noForward: + if origBody != nil { + r.Body = origBody + } + forwardRequest(core, w, r) + return + case !ok: + // If not ok, we simply return. The call on request should have + // taken care of setting the appropriate response code and payload + // in this case. + return + default: + // Build and return the proper response if everything is fine. + respondLogical(w, r, req, resp, injectDataIntoTopLevel) + return + } + }) +} + +func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, resp *logical.Response, injectDataIntoTopLevel bool) { + var httpResp *logical.HTTPResponse + var ret interface{} + + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. + if req != nil && req.ResponseWriter != nil && req.ResponseWriter.Written() { + return + } + + if resp != nil { + if resp.Redirect != "" { + // If we have a redirect, redirect! We use a 307 code + // because we don't actually know if its permanent. + http.Redirect(w, r, resp.Redirect, 307) + return + } + + // Check if this is a raw response + if _, ok := resp.Data[logical.HTTPStatusCode]; ok { + respondRaw(w, r, resp) + return + } + + if resp.WrapInfo != nil && resp.WrapInfo.Token != "" { + httpResp = &logical.HTTPResponse{ + WrapInfo: &logical.HTTPWrapInfo{ + Token: resp.WrapInfo.Token, + Accessor: resp.WrapInfo.Accessor, + TTL: int(resp.WrapInfo.TTL.Seconds()), + CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + }, + } + } else { + httpResp = logical.LogicalResponseToHTTPResponse(resp) + httpResp.RequestID = req.ID + } + + ret = httpResp + + if injectDataIntoTopLevel { + injector := logical.HTTPSysInjector{ + Response: httpResp, + } + ret = injector + } + } + + // Respond + respondOk(w, ret) + return +} + +// respondRaw is used when the response is using HTTPContentType and HTTPRawBody +// to change the default response handling. This is only used for specific things like +// returning the CRL information on the PKI backends. +func respondRaw(w http.ResponseWriter, r *http.Request, resp *logical.Response) { + retErr := func(w http.ResponseWriter, err string) { + w.Header().Set("X-Vault-Raw-Error", err) + w.WriteHeader(http.StatusInternalServerError) + w.Write(nil) + } + + // Ensure this is never a secret or auth response + if resp.Secret != nil || resp.Auth != nil { + retErr(w, "raw responses cannot contain secrets or auth") + return + } + + // Get the status code + statusRaw, ok := resp.Data[logical.HTTPStatusCode] + if !ok { + retErr(w, "no status code given") + return + } + + var status int + switch statusRaw.(type) { + case int: + status = statusRaw.(int) + case float64: + status = int(statusRaw.(float64)) + case json.Number: + s64, err := statusRaw.(json.Number).Float64() + if err != nil { + retErr(w, "cannot decode status code") + return + } + status = int(s64) + default: + retErr(w, "cannot decode status code") + return + } + + nonEmpty := status != http.StatusNoContent + + var contentType string + var body []byte + + // Get the content type header; don't require it if the body is empty + contentTypeRaw, ok := resp.Data[logical.HTTPContentType] + if !ok && nonEmpty { + retErr(w, "no content type given") + return + } + if ok { + contentType, ok = contentTypeRaw.(string) + if !ok { + retErr(w, "cannot decode content type") + return + } + } + + if nonEmpty { + // Get the body + bodyRaw, ok := resp.Data[logical.HTTPRawBody] + if !ok { + goto WRITE_RESPONSE + } + + switch bodyRaw.(type) { + case string: + // This is best effort. The value may already be base64-decoded so + // if it doesn't work we just use as-is + bodyDec, err := base64.StdEncoding.DecodeString(bodyRaw.(string)) + if err == nil { + body = bodyDec + } else { + body = []byte(bodyRaw.(string)) + } + case []byte: + body = bodyRaw.([]byte) + default: + retErr(w, "cannot decode body") + return + } + } + +WRITE_RESPONSE: + // Write the response + if contentType != "" { + w.Header().Set("Content-Type", contentType) + } + + if cacheControl, ok := resp.Data[logical.HTTPRawCacheControl].(string); ok { + w.Header().Set("Cache-Control", cacheControl) + } + + w.WriteHeader(status) + w.Write(body) +} + +// getConnection is used to format the connection information for +// attaching to a logical request +func getConnection(r *http.Request) (connection *logical.Connection) { + var remoteAddr string + + remoteAddr, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + remoteAddr = "" + } + + connection = &logical.Connection{ + RemoteAddr: remoteAddr, + ConnState: r.TLS, + } + return +} diff --git a/vendor/github.com/hashicorp/vault/http/stub_assets.go b/vendor/github.com/hashicorp/vault/http/stub_assets.go new file mode 100644 index 00000000..c64ac582 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/stub_assets.go @@ -0,0 +1,16 @@ +// +build !ui + +package http + +import ( + assetfs "github.com/elazarl/go-bindata-assetfs" +) + +func init() { + uiBuiltIn = false +} + +// assetFS is a stub for building Vault without a UI. +func assetFS() *assetfs.AssetFS { + return nil +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_generate_root.go b/vendor/github.com/hashicorp/vault/http/sys_generate_root.go new file mode 100644 index 00000000..dae751a4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_generate_root.go @@ -0,0 +1,211 @@ +package http + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/vault" +) + +func handleSysGenerateRootAttempt(core *vault.Core, generateStrategy vault.GenerateRootStrategy) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysGenerateRootAttemptGet(core, w, r, "") + case "POST", "PUT": + handleSysGenerateRootAttemptPut(core, w, r, generateStrategy) + case "DELETE": + handleSysGenerateRootAttemptDelete(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request, otp string) { + ctx, cancel := core.GetContext() + defer cancel() + + // Get the current seal configuration + barrierConfig, err := core.SealAccess().BarrierConfig(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + sealConfig := barrierConfig + if core.SealAccess().RecoveryKeySupported() { + sealConfig, err = core.SealAccess().RecoveryConfig(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + } + + // Get the generation configuration + generationConfig, err := core.GenerateRootConfiguration() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // Get the progress + progress, err := core.GenerateRootProgress() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // Format the status + status := &GenerateRootStatusResponse{ + Started: false, + Progress: progress, + Required: sealConfig.SecretThreshold, + Complete: false, + OTPLength: vault.TokenLength + 2, + OTP: otp, + } + if generationConfig != nil { + status.Nonce = generationConfig.Nonce + status.Started = true + status.PGPFingerprint = generationConfig.PGPFingerprint + } + + respondOk(w, status) +} + +func handleSysGenerateRootAttemptPut(core *vault.Core, w http.ResponseWriter, r *http.Request, generateStrategy vault.GenerateRootStrategy) { + // Parse the request + var req GenerateRootInitRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil && err != io.EOF { + respondError(w, http.StatusBadRequest, err) + return + } + + var err error + var genned bool + + switch { + case len(req.PGPKey) > 0, len(req.OTP) > 0: + default: + genned = true + req.OTP, err = base62.Random(vault.TokenLength + 2) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + } + + // Attemptialize the generation + if err := core.GenerateRootInit(req.OTP, req.PGPKey, generateStrategy); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if genned { + handleSysGenerateRootAttemptGet(core, w, r, req.OTP) + return + } + + handleSysGenerateRootAttemptGet(core, w, r, "") +} + +func handleSysGenerateRootAttemptDelete(core *vault.Core, w http.ResponseWriter, r *http.Request) { + err := core.GenerateRootCancel() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + respondOk(w, nil) +} + +func handleSysGenerateRootUpdate(core *vault.Core, generateStrategy vault.GenerateRootStrategy) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Parse the request + var req GenerateRootUpdateRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on root generation + result, err := core.GenerateRootUpdate(ctx, key, req.Nonce, generateStrategy) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + resp := &GenerateRootStatusResponse{ + Complete: result.Progress == result.Required, + Nonce: req.Nonce, + Progress: result.Progress, + Required: result.Required, + Started: true, + EncodedToken: result.EncodedToken, + PGPFingerprint: result.PGPFingerprint, + } + + if generateStrategy == vault.GenerateStandardRootTokenStrategy { + resp.EncodedRootToken = result.EncodedToken + } + + respondOk(w, resp) + }) +} + +type GenerateRootInitRequest struct { + OTP string `json:"otp"` + PGPKey string `json:"pgp_key"` +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} + +type GenerateRootUpdateRequest struct { + Nonce string + Key string +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_health.go b/vendor/github.com/hashicorp/vault/http/sys_health.go new file mode 100644 index 00000000..973b79b2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_health.go @@ -0,0 +1,219 @@ +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/version" + "github.com/hashicorp/vault/vault" +) + +func handleSysHealth(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysHealthGet(core, w, r) + case "HEAD": + handleSysHealthHead(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func fetchStatusCode(r *http.Request, field string) (int, bool, bool) { + var err error + statusCode := http.StatusOK + if statusCodeStr, statusCodeOk := r.URL.Query()[field]; statusCodeOk { + statusCode, err = strconv.Atoi(statusCodeStr[0]) + if err != nil || len(statusCodeStr) < 1 { + return http.StatusBadRequest, false, false + } + return statusCode, true, true + } + return statusCode, false, true +} + +func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + code, body, err := getSysHealth(core, r) + if err != nil { + core.Logger().Error("error checking health", "error", err) + respondError(w, code, nil) + return + } + + if body == nil { + respondError(w, code, nil) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + + // Generate the response + enc := json.NewEncoder(w) + enc.Encode(body) +} + +func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Request) { + code, body, _ := getSysHealth(core, r) + + if body != nil { + w.Header().Set("Content-Type", "application/json") + } + w.WriteHeader(code) +} + +func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, error) { + var err error + + // Check if being a standby is allowed for the purpose of a 200 OK + standbyOKStr, standbyOK := r.URL.Query()["standbyok"] + if standbyOK { + standbyOK, err = parseutil.ParseBool(standbyOKStr[0]) + if err != nil { + return http.StatusBadRequest, nil, errwrap.Wrapf("bad value for standbyok parameter: {{err}}", err) + } + } + perfStandbyOKStr, perfStandbyOK := r.URL.Query()["perfstandbyok"] + if perfStandbyOK { + perfStandbyOK, err = parseutil.ParseBool(perfStandbyOKStr[0]) + if err != nil { + return http.StatusBadRequest, nil, errwrap.Wrapf("bad value for perfstandbyok parameter: {{err}}", err) + } + } + + uninitCode := http.StatusNotImplemented + if code, found, ok := fetchStatusCode(r, "uninitcode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + uninitCode = code + } + + sealedCode := http.StatusServiceUnavailable + if code, found, ok := fetchStatusCode(r, "sealedcode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + sealedCode = code + } + + standbyCode := http.StatusTooManyRequests // Consul warning code + if code, found, ok := fetchStatusCode(r, "standbycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + standbyCode = code + } + + activeCode := http.StatusOK + if code, found, ok := fetchStatusCode(r, "activecode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + activeCode = code + } + + drSecondaryCode := 472 // unofficial 4xx status code + if code, found, ok := fetchStatusCode(r, "drsecondarycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + drSecondaryCode = code + } + + perfStandbyCode := 473 // unofficial 4xx status code + if code, found, ok := fetchStatusCode(r, "performancestandbycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + perfStandbyCode = code + } + + ctx := context.Background() + + // Check system status + sealed := core.Sealed() + standby, _ := core.Standby() + perfStandby := core.PerfStandby() + var replicationState consts.ReplicationState + if standby { + replicationState = core.ActiveNodeReplicationState() + } else { + replicationState = core.ReplicationState() + } + + init, err := core.Initialized(ctx) + if err != nil { + return http.StatusInternalServerError, nil, err + } + + // Determine the status code + code := activeCode + switch { + case !init: + code = uninitCode + case sealed: + code = sealedCode + case replicationState.HasState(consts.ReplicationDRSecondary): + code = drSecondaryCode + case perfStandby: + if !perfStandbyOK { + code = perfStandbyCode + } + case standby: + if !standbyOK { + code = standbyCode + } + } + + // Fetch the local cluster name and identifier + var clusterName, clusterID string + if !sealed { + cluster, err := core.Cluster(ctx) + if err != nil { + return http.StatusInternalServerError, nil, err + } + if cluster == nil { + return http.StatusInternalServerError, nil, fmt.Errorf("failed to fetch cluster details") + } + clusterName = cluster.Name + clusterID = cluster.ID + } + + // Format the body + body := &HealthResponse{ + Initialized: init, + Sealed: sealed, + Standby: standby, + PerformanceStandby: perfStandby, + ReplicationPerformanceMode: replicationState.GetPerformanceString(), + ReplicationDRMode: replicationState.GetDRString(), + ServerTimeUTC: time.Now().UTC().Unix(), + Version: version.GetVersion().VersionNumber(), + ClusterName: clusterName, + ClusterID: clusterID, + } + + if init && !sealed && !standby { + body.LastWAL = vault.LastWAL(core) + } + + return code, body, nil +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_init.go b/vendor/github.com/hashicorp/vault/http/sys_init.go new file mode 100644 index 00000000..ca77ada9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_init.go @@ -0,0 +1,130 @@ +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "net/http" + + "github.com/hashicorp/vault/vault" +) + +func handleSysInit(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysInitGet(core, w, r) + case "PUT", "POST": + handleSysInitPut(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysInitGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + init, err := core.Initialized(context.Background()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, &InitStatusResponse{ + Initialized: init, + }) +} + +func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + + // Parse the request + var req InitRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + // Initialize + barrierConfig := &vault.SealConfig{ + SecretShares: req.SecretShares, + SecretThreshold: req.SecretThreshold, + StoredShares: req.StoredShares, + PGPKeys: req.PGPKeys, + } + + recoveryConfig := &vault.SealConfig{ + SecretShares: req.RecoveryShares, + SecretThreshold: req.RecoveryThreshold, + PGPKeys: req.RecoveryPGPKeys, + } + + initParams := &vault.InitParams{ + BarrierConfig: barrierConfig, + RecoveryConfig: recoveryConfig, + RootTokenPGPKey: req.RootTokenPGPKey, + } + + result, initErr := core.Initialize(ctx, initParams) + if initErr != nil { + if vault.IsFatalError(initErr) { + respondError(w, http.StatusBadRequest, initErr) + return + } else { + // Add a warnings field? The error will be logged in the vault log + // already. + } + } + + // Encode the keys + keys := make([]string, 0, len(result.SecretShares)) + keysB64 := make([]string, 0, len(result.SecretShares)) + for _, k := range result.SecretShares { + keys = append(keys, hex.EncodeToString(k)) + keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k)) + } + + resp := &InitResponse{ + Keys: keys, + KeysB64: keysB64, + RootToken: result.RootToken, + } + + if len(result.RecoveryShares) > 0 { + resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares)) + resp.RecoveryKeysB64 = make([]string, 0, len(result.RecoveryShares)) + for _, k := range result.RecoveryShares { + resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k)) + resp.RecoveryKeysB64 = append(resp.RecoveryKeysB64, base64.StdEncoding.EncodeToString(k)) + } + } + + if err := core.UnsealWithStoredKeys(ctx); err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, resp) +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys,omitempty"` + RecoveryKeysB64 []string `json:"recovery_keys_base64,omitempty"` + RootToken string `json:"root_token"` +} + +type InitStatusResponse struct { + Initialized bool `json:"initialized"` +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader.go b/vendor/github.com/hashicorp/vault/http/sys_leader.go new file mode 100644 index 00000000..76ba92b2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_leader.go @@ -0,0 +1,56 @@ +package http + +import ( + "net/http" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/vault" +) + +func handleSysLeader(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysLeaderGet(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + haEnabled := true + isLeader, address, clusterAddr, err := core.Leader() + if errwrap.Contains(err, vault.ErrHANotEnabled.Error()) { + haEnabled = false + err = nil + } + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + resp := &LeaderResponse{ + HAEnabled: haEnabled, + IsSelf: isLeader, + LeaderAddress: address, + LeaderClusterAddress: clusterAddr, + PerfStandby: core.PerfStandby(), + } + if resp.PerfStandby { + resp.PerfStandbyLastRemoteWAL = vault.LastRemoteWAL(core) + } else if isLeader || !haEnabled { + resp.LastWAL = vault.LastWAL(core) + } + + respondOk(w, resp) +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_metrics.go b/vendor/github.com/hashicorp/vault/http/sys_metrics.go new file mode 100644 index 00000000..ee55383f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_metrics.go @@ -0,0 +1,42 @@ +package http + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func handleMetricsUnauthenticated(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req := &logical.Request{Headers: r.Header} + + // Parse form + if err := r.ParseForm(); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(req) + } + + // Define response + resp := core.MetricsHelper().ResponseForFormat(format) + + // Manually extract the logical response and send back the information + w.WriteHeader(resp.Data[logical.HTTPStatusCode].(int)) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.Write([]byte(v)) + case []byte: + w.Write(v) + default: + respondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_raft.go b/vendor/github.com/hashicorp/vault/http/sys_raft.go new file mode 100644 index 00000000..75d6ccf9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_raft.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "crypto/tls" + "errors" + "io" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/tlsutil" + "github.com/hashicorp/vault/vault" +) + +func handleSysRaftJoin(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST", "PUT": + handleSysRaftJoinPost(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req JoinRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil && err != io.EOF { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.NonVoter && !nonVotersAllowed { + respondError(w, http.StatusBadRequest, errors.New("non-voting nodes not allowed")) + } + + var tlsConfig *tls.Config + var err error + if len(req.LeaderCACert) != 0 || len(req.LeaderClientCert) != 0 || len(req.LeaderClientKey) != 0 { + tlsConfig, err = tlsutil.ClientTLSConfig([]byte(req.LeaderCACert), []byte(req.LeaderClientCert), []byte(req.LeaderClientKey)) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + } + + joined, err := core.JoinRaftCluster(context.Background(), req.LeaderAPIAddr, tlsConfig, req.Retry, req.NonVoter) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + resp := JoinResponse{ + Joined: joined, + } + respondOk(w, resp) +} + +type JoinResponse struct { + Joined bool `json:"joined"` +} + +type JoinRequest struct { + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey.go b/vendor/github.com/hashicorp/vault/http/sys_rekey.go new file mode 100644 index 00000000..eb8760f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_rekey.go @@ -0,0 +1,411 @@ +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" +) + +func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + repState := core.ReplicationState() + if repState.HasState(consts.ReplicationPerformanceSecondary) { + respondError(w, http.StatusBadRequest, + fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated")) + return + } + + ctx, cancel := core.GetContext() + defer cancel() + + switch { + case recovery && !core.SealAccess().RecoveryKeySupported(): + respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported")) + case r.Method == "GET": + handleSysRekeyInitGet(ctx, core, recovery, w, r) + case r.Method == "POST" || r.Method == "PUT": + handleSysRekeyInitPut(ctx, core, recovery, w, r) + case r.Method == "DELETE": + handleSysRekeyInitDelete(ctx, core, recovery, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRekeyInitGet(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + barrierConfig, barrierConfErr := core.SealAccess().BarrierConfig(ctx) + if barrierConfErr != nil { + respondError(w, http.StatusInternalServerError, barrierConfErr) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + // Get the rekey configuration + rekeyConf, err := core.RekeyConfig(recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + sealThreshold, err := core.RekeyThreshold(ctx, recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + // Format the status + status := &RekeyStatusResponse{ + Started: false, + T: 0, + N: 0, + Required: sealThreshold, + } + if rekeyConf != nil { + // Get the progress + started, progress, err := core.RekeyProgress(recovery, false) + if err != nil { + respondError(w, err.Code(), err) + return + } + + status.Nonce = rekeyConf.Nonce + status.Started = started + status.T = rekeyConf.SecretThreshold + status.N = rekeyConf.SecretShares + status.Progress = progress + status.VerificationRequired = rekeyConf.VerificationRequired + status.VerificationNonce = rekeyConf.VerificationNonce + if rekeyConf.PGPKeys != nil && len(rekeyConf.PGPKeys) != 0 { + pgpFingerprints, err := pgpkeys.GetFingerprints(rekeyConf.PGPKeys, nil) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + status.PGPFingerprints = pgpFingerprints + status.Backup = rekeyConf.Backup + } + } + respondOk(w, status) +} + +func handleSysRekeyInitPut(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req RekeyRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.Backup && len(req.PGPKeys) == 0 { + respondError(w, http.StatusBadRequest, fmt.Errorf("cannot request a backup of the new keys without providing PGP keys for encryption")) + return + } + + if len(req.PGPKeys) > 0 && len(req.PGPKeys) != req.SecretShares { + respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for rekey")) + return + } + + // Initialize the rekey + err := core.RekeyInit(&vault.SealConfig{ + SecretShares: req.SecretShares, + SecretThreshold: req.SecretThreshold, + StoredShares: req.StoredShares, + PGPKeys: req.PGPKeys, + Backup: req.Backup, + VerificationRequired: req.RequireVerification, + }, recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + handleSysRekeyInitGet(ctx, core, recovery, w, r) +} + +func handleSysRekeyInitDelete(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + if err := core.RekeyCancel(recovery); err != nil { + respondError(w, err.Code(), err) + return + } + respondOk(w, nil) +} + +func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + // Parse the request + var req RekeyUpdateRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on rekey + result, rekeyErr := core.RekeyUpdate(ctx, key, req.Nonce, recovery) + if rekeyErr != nil { + respondError(w, rekeyErr.Code(), rekeyErr) + return + } + + // Format the response + resp := &RekeyUpdateResponse{} + if result != nil { + resp.Complete = true + resp.Nonce = req.Nonce + resp.Backup = result.Backup + resp.PGPFingerprints = result.PGPFingerprints + resp.VerificationRequired = result.VerificationRequired + resp.VerificationNonce = result.VerificationNonce + + // Encode the keys + keys := make([]string, 0, len(result.SecretShares)) + keysB64 := make([]string, 0, len(result.SecretShares)) + for _, k := range result.SecretShares { + keys = append(keys, hex.EncodeToString(k)) + keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k)) + } + resp.Keys = keys + resp.KeysB64 = keysB64 + respondOk(w, resp) + } else { + handleSysRekeyInitGet(ctx, core, recovery, w, r) + } + }) +} + +func handleSysRekeyVerify(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + repState := core.ReplicationState() + if repState.HasState(consts.ReplicationPerformanceSecondary) { + respondError(w, http.StatusBadRequest, + fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated")) + return + } + + ctx, cancel := core.GetContext() + defer cancel() + + switch { + case recovery && !core.SealAccess().RecoveryKeySupported(): + respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported")) + case r.Method == "GET": + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) + case r.Method == "POST" || r.Method == "PUT": + handleSysRekeyVerifyPut(ctx, core, recovery, w, r) + case r.Method == "DELETE": + handleSysRekeyVerifyDelete(ctx, core, recovery, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRekeyVerifyGet(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + barrierConfig, barrierConfErr := core.SealAccess().BarrierConfig(ctx) + if barrierConfErr != nil { + respondError(w, http.StatusInternalServerError, barrierConfErr) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + // Get the rekey configuration + rekeyConf, err := core.RekeyConfig(recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + if rekeyConf == nil { + respondError(w, http.StatusBadRequest, errors.New("no rekey configuration found")) + return + } + + // Get the progress + started, progress, err := core.RekeyProgress(recovery, true) + if err != nil { + respondError(w, err.Code(), err) + return + } + + // Format the status + status := &RekeyVerificationStatusResponse{ + Started: started, + Nonce: rekeyConf.VerificationNonce, + T: rekeyConf.SecretThreshold, + N: rekeyConf.SecretShares, + Progress: progress, + } + respondOk(w, status) +} + +func handleSysRekeyVerifyDelete(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + if err := core.RekeyVerifyRestart(recovery); err != nil { + respondError(w, err.Code(), err) + return + } + + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) +} + +func handleSysRekeyVerifyPut(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req RekeyVerificationUpdateRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on rekey + result, rekeyErr := core.RekeyVerify(ctx, key, req.Nonce, recovery) + if rekeyErr != nil { + respondError(w, rekeyErr.Code(), rekeyErr) + return + } + + // Format the response + resp := &RekeyVerificationUpdateResponse{} + if result != nil { + resp.Complete = true + resp.Nonce = result.Nonce + respondOk(w, resp) + } else { + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) + } +} + +type RekeyRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool `json:"backup"` + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyUpdateRequest struct { + Nonce string + Key string +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyVerificationUpdateRequest struct { + Nonce string `json:"nonce"` + Key string `json:"key"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_seal.go b/vendor/github.com/hashicorp/vault/http/sys_seal.go new file mode 100644 index 00000000..1cf520c0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_seal.go @@ -0,0 +1,264 @@ +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" + "github.com/hashicorp/vault/vault" +) + +func handleSysSeal(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + switch req.Operation { + case logical.UpdateOperation: + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Seal with the token above + // We use context.Background since there won't be a request context if the node isn't active + if err := core.SealWithRequest(r.Context(), req); err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, err) + return + } + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, nil) + }) +} + +func handleSysStepDown(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + switch req.Operation { + case logical.UpdateOperation: + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Seal with the token above + if err := core.StepDown(r.Context(), req); err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, err) + return + } + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, nil) + }) +} + +func handleSysUnseal(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "PUT": + case "POST": + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Parse the request + var req UnsealRequest + if _, err := parseRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.Reset { + if !core.Sealed() { + respondError(w, http.StatusBadRequest, errors.New("vault is unsealed")) + return + } + core.ResetUnsealProcess() + handleSysSealStatusRaw(core, w, r) + return + } + + isInSealMigration := core.IsInSealMigration() + if !req.Migrate && isInSealMigration { + respondError( + w, http.StatusBadRequest, + errors.New("'migrate' parameter must be set true in JSON body when in seal migration mode")) + return + } + if req.Migrate && !isInSealMigration { + respondError( + w, http.StatusBadRequest, + errors.New("'migrate' parameter set true in JSON body when not in seal migration mode")) + return + } + + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON, or 'reset' set to true")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + // Attempt the unseal + if core.SealAccess().RecoveryKeySupported() { + _, err = core.UnsealWithRecoveryKeys(key) + } else { + _, err = core.Unseal(key) + } + if err != nil { + switch { + case errwrap.ContainsType(err, new(vault.ErrInvalidKey)): + case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()): + case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()): + case errwrap.Contains(err, vault.ErrBarrierSealed.Error()): + case errwrap.Contains(err, consts.ErrStandby.Error()): + default: + respondError(w, http.StatusInternalServerError, err) + return + } + respondError(w, http.StatusBadRequest, err) + return + } + + // Return the seal status + handleSysSealStatusRaw(core, w, r) + }) +} + +func handleSysSealStatus(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + handleSysSealStatusRaw(core, w, r) + }) +} + +func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + + sealed := core.Sealed() + + var sealConfig *vault.SealConfig + var err error + if core.SealAccess().RecoveryKeySupported() { + sealConfig, err = core.SealAccess().RecoveryConfig(ctx) + } else { + sealConfig, err = core.SealAccess().BarrierConfig(ctx) + } + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + if sealConfig == nil { + respondOk(w, &SealStatusResponse{ + Type: core.SealAccess().BarrierType(), + Initialized: false, + Sealed: true, + RecoverySeal: core.SealAccess().RecoveryKeySupported(), + StorageType: core.StorageType(), + }) + return + } + + // Fetch the local cluster name and identifier + var clusterName, clusterID string + if !sealed { + cluster, err := core.Cluster(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + if cluster == nil { + respondError(w, http.StatusInternalServerError, fmt.Errorf("failed to fetch cluster details")) + return + } + clusterName = cluster.Name + clusterID = cluster.ID + } + + progress, nonce := core.SecretProgress() + + respondOk(w, &SealStatusResponse{ + Type: sealConfig.Type, + Initialized: true, + Sealed: sealed, + T: sealConfig.SecretThreshold, + N: sealConfig.SecretShares, + Progress: progress, + Nonce: nonce, + Version: version.GetVersion().VersionNumber(), + Migration: core.IsInSealMigration(), + ClusterName: clusterName, + ClusterID: clusterID, + RecoverySeal: core.SealAccess().RecoveryKeySupported(), + StorageType: core.StorageType(), + }) +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` +} + +// Note: because we didn't provide explicit tagging in the past we can't do it +// now because if it then no longer accepts capitalized versions it could break +// clients +type UnsealRequest struct { + Key string + Reset bool + Migrate bool +} diff --git a/vendor/github.com/hashicorp/vault/http/testing.go b/vendor/github.com/hashicorp/vault/http/testing.go new file mode 100644 index 00000000..ebc2edd1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/testing.go @@ -0,0 +1,67 @@ +package http + +import ( + "fmt" + "net" + "net/http" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestListener(tb testing.TB) (net.Listener, string) { + fail := func(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) + } + if tb != nil { + fail = tb.Fatalf + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + fail("err: %s", err) + } + addr := "http://" + ln.Addr().String() + return ln, addr +} + +func TestServerWithListenerAndProperties(tb testing.TB, ln net.Listener, addr string, core *vault.Core, props *vault.HandlerProperties) { + // Create a muxer to handle our requests so that we can authenticate + // for tests. + mux := http.NewServeMux() + mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth)) + mux.Handle("/", Handler(props)) + + server := &http.Server{ + Addr: ln.Addr().String(), + Handler: mux, + ErrorLog: core.Logger().StandardLogger(nil), + } + go server.Serve(ln) +} + +func TestServerWithListener(tb testing.TB, ln net.Listener, addr string, core *vault.Core) { + // Create a muxer to handle our requests so that we can authenticate + // for tests. + props := &vault.HandlerProperties{ + Core: core, + MaxRequestSize: DefaultMaxRequestSize, + } + TestServerWithListenerAndProperties(tb, ln, addr, core, props) +} + +func TestServer(tb testing.TB, core *vault.Core) (net.Listener, string) { + ln, addr := TestListener(tb) + TestServerWithListener(tb, ln, addr, core) + return ln, addr +} + +func TestServerAuth(tb testing.TB, addr string, token string) { + if _, err := http.Get(addr + "/_test/auth?token=" + token); err != nil { + tb.Fatalf("error authenticating: %s", err) + } +} + +func testHandleAuth(w http.ResponseWriter, req *http.Request) { + respondOk(w, nil) +} diff --git a/vendor/github.com/hashicorp/vault/http/util.go b/vendor/github.com/hashicorp/vault/http/util.go new file mode 100644 index 00000000..ff50a483 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/util.go @@ -0,0 +1,24 @@ +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/vault" +) + +var ( + adjustRequest = func(c *vault.Core, r *http.Request) (*http.Request, int) { + return r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace)), 0 + } + + genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { + // Wrap the help wrapped handler with another layer with a generic + // handler + return wrapGenericHandler(core, in, props.MaxRequestSize, props.MaxRequestDuration) + } + + additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {} + + nonVotersAllowed = false +) diff --git a/vendor/github.com/hashicorp/vault/physical/raft/fsm.go b/vendor/github.com/hashicorp/vault/physical/raft/fsm.go new file mode 100644 index 00000000..22f52562 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/fsm.go @@ -0,0 +1,852 @@ +package raft + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + protoio "github.com/gogo/protobuf/io" + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/plugin/pb" + bolt "go.etcd.io/bbolt" +) + +const ( + deleteOp uint32 = 1 << iota + putOp + restoreCallbackOp + + chunkingPrefix = "raftchunking/" +) + +var ( + // dataBucketName is the value we use for the bucket + dataBucketName = []byte("data") + configBucketName = []byte("config") + latestIndexKey = []byte("latest_indexes") + latestConfigKey = []byte("latest_config") +) + +// Verify FSM satisfies the correct interfaces +var _ physical.Backend = (*FSM)(nil) +var _ physical.Transactional = (*FSM)(nil) +var _ raft.FSM = (*FSM)(nil) +var _ raft.BatchingFSM = (*FSM)(nil) + +type restoreCallback func(context.Context) error + +// FSMApplyResponse is returned from an FSM apply. It indicates if the apply was +// successful or not. +type FSMApplyResponse struct { + Success bool +} + +// FSM is Vault's primary state storage. It writes updates to an bolt db file +// that lives on local disk. FSM implements raft.FSM and physical.Backend +// interfaces. +type FSM struct { + // latestIndex and latestTerm must stay at the top of this struct to be + // properly 64-bit aligned. + + // latestIndex and latestTerm are the term and index of the last log we + // received + latestIndex *uint64 + latestTerm *uint64 + // latestConfig is the latest server configuration we've seen + latestConfig atomic.Value + + l sync.RWMutex + path string + logger log.Logger + noopRestore bool + + db *bolt.DB + + // retoreCb is called after we've restored a snapshot + restoreCb restoreCallback + + // This is just used in tests to disable to storing the latest indexes and + // configs so we can conform to the standard backend tests, which expect to + // additional state in the backend. + storeLatestState bool + + chunker *raftchunking.ChunkingBatchingFSM +} + +// NewFSM constructs a FSM using the given directory +func NewFSM(conf map[string]string, logger log.Logger) (*FSM, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + + dbPath := filepath.Join(path, "vault.db") + + boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + + // Initialize the latest term, index, and config values + latestTerm := new(uint64) + latestIndex := new(uint64) + latestConfig := atomic.Value{} + atomic.StoreUint64(latestTerm, 0) + atomic.StoreUint64(latestIndex, 0) + latestConfig.Store((*ConfigurationValue)(nil)) + + err = boltDB.Update(func(tx *bolt.Tx) error { + // make sure we have the necessary buckets created + _, err := tx.CreateBucketIfNotExists(dataBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + b, err := tx.CreateBucketIfNotExists(configBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + // Read in our latest index and term and populate it inmemory + val := b.Get(latestIndexKey) + if val != nil { + var latest IndexValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + atomic.StoreUint64(latestTerm, latest.Term) + atomic.StoreUint64(latestIndex, latest.Index) + } + + // Read in our latest config and populate it inmemory + val = b.Get(latestConfigKey) + if val != nil { + var latest ConfigurationValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + latestConfig.Store(&latest) + } + return nil + }) + if err != nil { + return nil, err + } + + storeLatestState := true + if _, ok := conf["doNotStoreLatestState"]; ok { + storeLatestState = false + } + + f := &FSM{ + path: conf["path"], + logger: logger, + + db: boltDB, + latestTerm: latestTerm, + latestIndex: latestIndex, + latestConfig: latestConfig, + storeLatestState: storeLatestState, + } + + f.chunker = raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ + f: f, + ctx: context.Background(), + }) + + return f, nil +} + +// LatestState returns the latest index and configuration values we have seen on +// this FSM. +func (f *FSM) LatestState() (*IndexValue, *ConfigurationValue) { + return &IndexValue{ + Term: atomic.LoadUint64(f.latestTerm), + Index: atomic.LoadUint64(f.latestIndex), + }, f.latestConfig.Load().(*ConfigurationValue) +} + +func (f *FSM) witnessIndex(i *IndexValue) { + seen, _ := f.LatestState() + if seen.Index < i.Index { + atomic.StoreUint64(f.latestIndex, i.Index) + atomic.StoreUint64(f.latestTerm, i.Term) + } +} + +func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configuration raft.Configuration) error { + var indexBytes []byte + latestIndex, _ := f.LatestState() + + latestIndex.Index = index + latestIndex.Term = term + + var err error + indexBytes, err = proto.Marshal(latestIndex) + if err != nil { + return err + } + + protoConfig := raftConfigurationToProtoConfiguration(configurationIndex, configuration) + configBytes, err := proto.Marshal(protoConfig) + if err != nil { + return err + } + + if f.storeLatestState { + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(configBucketName) + err := b.Put(latestConfigKey, configBytes) + if err != nil { + return err + } + + err = b.Put(latestIndexKey, indexBytes) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + } + + atomic.StoreUint64(f.latestIndex, index) + atomic.StoreUint64(f.latestTerm, term) + f.latestConfig.Store(protoConfig) + + return nil +} + +// Delete deletes the given key from the bolt file. +func (f *FSM) Delete(ctx context.Context, path string) error { + defer metrics.MeasureSince([]string{"raft", "delete"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Delete([]byte(path)) + }) +} + +// Delete deletes the given key from the bolt file. +func (f *FSM) DeletePrefix(ctx context.Context, prefix string) error { + defer metrics.MeasureSince([]string{"raft", "delete_prefix"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + err := f.db.Update(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + + return nil + }) + + return err +} + +// Get retrieves the value at the given path from the bolt file. +func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"raft", "get"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + var valCopy []byte + var found bool + + err := f.db.View(func(tx *bolt.Tx) error { + + value := tx.Bucket(dataBucketName).Get([]byte(path)) + if value != nil { + found = true + valCopy = make([]byte, len(value)) + copy(valCopy, value) + } + + return nil + }) + if err != nil { + return nil, err + } + if !found { + return nil, nil + } + + return &physical.Entry{ + Key: path, + Value: valCopy, + }, nil +} + +// Put writes the given entry to the bolt file. +func (f *FSM) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"raft", "put"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + // Start a write transaction. + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value) + }) +} + +// List retrieves the set of keys with the given prefix from the bolt file. +func (f *FSM) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"raft", "list"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + var keys []string + + err := f.db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + key := string(k) + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + return nil + }) + + return keys, err +} + +// Transaction writes all the operations in the provided transaction to the bolt +// file. +func (f *FSM) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + f.l.RLock() + defer f.l.RUnlock() + + // TODO: should this be a Batch? + // Start a write transaction. + err := f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, txn := range txns { + var err error + switch txn.Operation { + case physical.PutOperation: + err = b.Put([]byte(txn.Entry.Key), txn.Entry.Value) + case physical.DeleteOperation: + err = b.Delete([]byte(txn.Entry.Key)) + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + if err != nil { + return err + } + } + + return nil + }) + return err +} + +// ApplyBatch will apply a set of logs to the FSM. This is called from the raft +// library. +func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { + if len(logs) == 0 { + return []interface{}{} + } + + // Do the unmarshalling first so we don't hold locks + var latestConfiguration *ConfigurationValue + commands := make([]interface{}, 0, len(logs)) + for _, log := range logs { + switch log.Type { + case raft.LogCommand: + command := &LogData{} + err := proto.Unmarshal(log.Data, command) + if err != nil { + f.logger.Error("error proto unmarshaling log data", "error", err) + panic("error proto unmarshaling log data") + } + commands = append(commands, command) + case raft.LogConfiguration: + configuration := raft.DecodeConfiguration(log.Data) + config := raftConfigurationToProtoConfiguration(log.Index, configuration) + + commands = append(commands, config) + + // Update the latest configuration the fsm has received; we will + // store this after it has been committed to storage. + latestConfiguration = config + + default: + panic(fmt.Sprintf("got unexpected log type: %d", log.Type)) + } + } + + // Only advance latest pointer if this log has a higher index value than + // what we have seen in the past. + var logIndex []byte + var err error + latestIndex, _ := f.LatestState() + lastLog := logs[len(logs)-1] + if latestIndex.Index < lastLog.Index { + logIndex, err = proto.Marshal(&IndexValue{ + Term: lastLog.Term, + Index: lastLog.Index, + }) + if err != nil { + f.logger.Error("unable to marshal latest index", "error", err) + panic("unable to marshal latest index") + } + } + + f.l.RLock() + defer f.l.RUnlock() + + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, commandRaw := range commands { + switch command := commandRaw.(type) { + case *LogData: + for _, op := range command.Operations { + var err error + switch op.OpType { + case putOp: + err = b.Put([]byte(op.Key), op.Value) + case deleteOp: + err = b.Delete([]byte(op.Key)) + case restoreCallbackOp: + if f.restoreCb != nil { + // Kick off the restore callback function in a go routine + go f.restoreCb(context.Background()) + } + default: + return fmt.Errorf("%q is not a supported transaction operation", op.OpType) + } + if err != nil { + return err + } + } + + case *ConfigurationValue: + b := tx.Bucket(configBucketName) + configBytes, err := proto.Marshal(command) + if err != nil { + return err + } + if err := b.Put(latestConfigKey, configBytes); err != nil { + return err + } + } + } + + if f.storeLatestState && len(logIndex) > 0 { + b := tx.Bucket(configBucketName) + err = b.Put(latestIndexKey, logIndex) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("failed to store data", "error", err) + panic("failed to store data") + } + + // If we advanced the latest value, update the in-memory representation too. + if len(logIndex) > 0 { + atomic.StoreUint64(f.latestTerm, lastLog.Term) + atomic.StoreUint64(f.latestIndex, lastLog.Index) + } + + // If one or more configuration changes were processed, store the latest one. + if latestConfiguration != nil { + f.latestConfig.Store(latestConfiguration) + } + + // Build the responses. The logs array is used here to ensure we reply to + // all command values; even if they are not of the types we expect. This + // should future proof this function from more log types being provided. + resp := make([]interface{}, len(logs)) + for i := range logs { + resp[i] = &FSMApplyResponse{ + Success: true, + } + } + + return resp +} + +// Apply will apply a log value to the FSM. This is called from the raft +// library. +func (f *FSM) Apply(log *raft.Log) interface{} { + return f.ApplyBatch([]*raft.Log{log})[0] +} + +type writeErrorCloser interface { + io.WriteCloser + CloseWithError(error) error +} + +// writeTo will copy the FSM's content to a remote sink. The data is written +// twice, once for use in determining various metadata attributes of the dataset +// (size, checksum, etc) and a second for the sink of the data. We also use a +// proto delimited writer so we can stream proto messages to the sink. +func (f *FSM) writeTo(ctx context.Context, metaSink writeErrorCloser, sink writeErrorCloser) { + protoWriter := protoio.NewDelimitedWriter(sink) + metadataProtoWriter := protoio.NewDelimitedWriter(metaSink) + + f.l.RLock() + defer f.l.RUnlock() + + err := f.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + + c := b.Cursor() + + // Do the first scan of the data for metadata purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := metadataProtoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + metaSink.CloseWithError(err) + return err + } + } + metaSink.Close() + + // Do the second scan for copy purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := protoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + return err + } + } + + return nil + }) + sink.CloseWithError(err) +} + +// Snapshot implements the FSM interface. It returns a noop snapshot object. +func (f *FSM) Snapshot() (raft.FSMSnapshot, error) { + return &noopSnapshotter{}, nil +} + +// SetNoopRestore is used to disable restore operations on raft startup. Because +// we are using persistent storage in our FSM we do not need to issue a restore +// on startup. +func (f *FSM) SetNoopRestore(enabled bool) { + f.l.Lock() + f.noopRestore = enabled + f.l.Unlock() +} + +// Restore reads data from the provided reader and writes it into the FSM. It +// first deletes the existing bucket to clear all existing data, then recreates +// it so we can copy in the snapshot. +func (f *FSM) Restore(r io.ReadCloser) error { + if f.noopRestore == true { + return nil + } + + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) + defer protoReader.Close() + + f.l.Lock() + defer f.l.Unlock() + + // Start a write transaction. + err := f.db.Update(func(tx *bolt.Tx) error { + err := tx.DeleteBucket(dataBucketName) + if err != nil { + return err + } + + b, err := tx.CreateBucket(dataBucketName) + if err != nil { + return err + } + + for { + s := new(pb.StorageEntry) + err := protoReader.ReadMsg(s) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + err = b.Put([]byte(s.Key), s.Value) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("could not restore snapshot", "error", err) + return err + } + + return nil +} + +// noopSnapshotter implements the fsm.Snapshot interface. It doesn't do anything +// since our SnapshotStore reads data out of the FSM on Open(). +type noopSnapshotter struct{} + +// Persist doesn't do anything. +func (s *noopSnapshotter) Persist(sink raft.SnapshotSink) error { + return nil +} + +// Release doesn't do anything. +func (s *noopSnapshotter) Release() {} + +// raftConfigurationToProtoConfiguration converts a raft configuration object to +// a proto value. +func raftConfigurationToProtoConfiguration(index uint64, configuration raft.Configuration) *ConfigurationValue { + servers := make([]*Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = &Server{ + Suffrage: int32(s.Suffrage), + Id: string(s.ID), + Address: string(s.Address), + } + } + return &ConfigurationValue{ + Index: index, + Servers: servers, + } +} + +// protoConfigurationToRaftConfiguration converts a proto configuration object +// to a raft object. +func protoConfigurationToRaftConfiguration(configuration *ConfigurationValue) (uint64, raft.Configuration) { + servers := make([]raft.Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = raft.Server{ + Suffrage: raft.ServerSuffrage(s.Suffrage), + ID: raft.ServerID(s.Id), + Address: raft.ServerAddress(s.Address), + } + } + return configuration.Index, raft.Configuration{ + Servers: servers, + } +} + +type FSMChunkStorage struct { + f *FSM + ctx context.Context +} + +// chunkPaths returns a disk prefix and key given chunkinfo +func (f *FSMChunkStorage) chunkPaths(chunk *raftchunking.ChunkInfo) (string, string) { + prefix := fmt.Sprintf("%s%d/", chunkingPrefix, chunk.OpNum) + key := fmt.Sprintf("%s%d", prefix, chunk.SequenceNum) + return prefix, key +} + +func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error) { + b, err := jsonutil.EncodeJSON(chunk) + if err != nil { + return false, errwrap.Wrapf("error encoding chunk info: {{err}}", err) + } + + prefix, key := f.chunkPaths(chunk) + + entry := &physical.Entry{ + Key: key, + Value: b, + } + + f.f.l.RLock() + defer f.f.l.RUnlock() + + // Start a write transaction. + done := new(bool) + if err := f.f.db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value); err != nil { + return errwrap.Wrapf("error storing chunk info: {{err}}", err) + } + + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + var keys []string + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + key := string(k) + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + *done = uint32(len(keys)) == chunk.NumChunks + + return nil + }); err != nil { + return false, err + } + + return *done, nil +} + +func (f *FSMChunkStorage) FinalizeOp(opNum uint64) ([]*raftchunking.ChunkInfo, error) { + ret, err := f.chunksForOpNum(opNum) + if err != nil { + return nil, errwrap.Wrapf("error getting chunks for op keys: {{err}}", err) + } + + prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum}) + if err := f.f.DeletePrefix(f.ctx, prefix); err != nil { + return nil, errwrap.Wrapf("error deleting prefix after op finalization: {{err}}", err) + } + + return ret, nil +} + +func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInfo, error) { + prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum}) + + opChunkKeys, err := f.f.List(f.ctx, prefix) + if err != nil { + return nil, errwrap.Wrapf("error fetching op chunk keys: {{err}}", err) + } + + if len(opChunkKeys) == 0 { + return nil, nil + } + + var ret []*raftchunking.ChunkInfo + + for _, v := range opChunkKeys { + seqNum, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errwrap.Wrapf("error converting seqnum to integer: {{err}}", err) + } + + entry, err := f.f.Get(f.ctx, prefix+v) + if err != nil { + return nil, errwrap.Wrapf("error fetching chunkinfo: {{err}}", err) + } + + var ci raftchunking.ChunkInfo + if err := jsonutil.DecodeJSON(entry.Value, &ci); err != nil { + return nil, errwrap.Wrapf("error decoding chunkinfo json: {{err}}", err) + } + + if ret == nil { + ret = make([]*raftchunking.ChunkInfo, ci.NumChunks) + } + + ret[seqNum] = &ci + } + + return ret, nil +} + +func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) { + opNums, err := f.f.List(f.ctx, chunkingPrefix) + if err != nil { + return nil, errwrap.Wrapf("error doing recursive list for chunk saving: {{err}}", err) + } + + if len(opNums) == 0 { + return nil, nil + } + + ret := make(raftchunking.ChunkMap, len(opNums)) + for _, opNumStr := range opNums { + opNum, err := strconv.ParseInt(opNumStr, 10, 64) + if err != nil { + return nil, errwrap.Wrapf("error parsing op num during chunk saving: {{err}}", err) + } + + opChunks, err := f.chunksForOpNum(uint64(opNum)) + if err != nil { + return nil, errwrap.Wrapf("error getting chunks for op keys during chunk saving: {{err}}", err) + } + + ret[uint64(opNum)] = opChunks + } + + return ret, nil +} + +func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error { + if err := f.f.DeletePrefix(f.ctx, chunkingPrefix); err != nil { + return errwrap.Wrapf("error deleting prefix for chunk restoration: {{err}}", err) + } + if len(chunks) == 0 { + return nil + } + + for opNum, opChunks := range chunks { + for _, chunk := range opChunks { + if chunk == nil { + continue + } + if chunk.OpNum != opNum { + return errors.New("unexpected op number in chunk") + } + if _, err := f.StoreChunk(chunk); err != nil { + return errwrap.Wrapf("error storing chunk during restoration: {{err}}", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/logstore/bolt_store.go b/vendor/github.com/hashicorp/vault/physical/raft/logstore/bolt_store.go new file mode 100644 index 00000000..c6690533 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/logstore/bolt_store.go @@ -0,0 +1,271 @@ +package logstore + +import ( + "errors" + + "github.com/hashicorp/raft" + bolt "go.etcd.io/bbolt" +) + +const ( + // Permissions to use on the db file. This is only used if the + // database file does not exist and needs to be created. + dbFileMode = 0600 +) + +var ( + // Bucket names we perform transactions in + dbLogs = []byte("logs") + dbConf = []byte("conf") + + // An error indicating a given key does not exist + ErrKeyNotFound = errors.New("not found") +) + +// BoltStore provides access to BoltDB for Raft to store and retrieve +// log entries. It also provides key/value storage, and can be used as +// a LogStore and StableStore. +type BoltStore struct { + // conn is the underlying handle to the db. + conn *bolt.DB + + // The path to the Bolt database file + path string +} + +// Options contains all the configuration used to open the BoltDB +type Options struct { + // Path is the file path to the BoltDB to use + Path string + + // BoltOptions contains any specific BoltDB options you might + // want to specify [e.g. open timeout] + BoltOptions *bolt.Options + + // NoSync causes the database to skip fsync calls after each + // write to the log. This is unsafe, so it should be used + // with caution. + NoSync bool +} + +// readOnly returns true if the contained bolt options say to open +// the DB in readOnly mode [this can be useful to tools that want +// to examine the log] +func (o *Options) readOnly() bool { + return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly +} + +// NewBoltStore takes a file path and returns a connected Raft backend. +func NewBoltStore(path string) (*BoltStore, error) { + return New(Options{Path: path}) +} + +// New uses the supplied options to open the BoltDB and prepare it for use as a raft backend. +func New(options Options) (*BoltStore, error) { + // Try to connect + handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions) + if err != nil { + return nil, err + } + handle.NoSync = options.NoSync + + // Create the new store + store := &BoltStore{ + conn: handle, + path: options.Path, + } + + // If the store was opened read-only, don't try and create buckets + if !options.readOnly() { + // Set up our buckets + if err := store.initialize(); err != nil { + store.Close() + return nil, err + } + } + return store, nil +} + +// initialize is used to set up all of the buckets. +func (b *BoltStore) initialize() error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Create all the buckets + if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { + return err + } + if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { + return err + } + + return tx.Commit() +} + +// Close is used to gracefully close the DB connection. +func (b *BoltStore) Close() error { + return b.conn.Close() +} + +// FirstIndex returns the first known index from the Raft log. +func (b *BoltStore) FirstIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if first, _ := curs.First(); first == nil { + return 0, nil + } else { + return bytesToUint64(first), nil + } +} + +// LastIndex returns the last known index from the Raft log. +func (b *BoltStore) LastIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if last, _ := curs.Last(); last == nil { + return 0, nil + } else { + return bytesToUint64(last), nil + } +} + +// GetLog is used to retrieve a log from BoltDB at a given index. +func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error { + tx, err := b.conn.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbLogs) + val := bucket.Get(uint64ToBytes(idx)) + + if val == nil { + return raft.ErrLogNotFound + } + + return decodeMsgPack(val, log) +} + +// StoreLog is used to store a single raft log +func (b *BoltStore) StoreLog(log *raft.Log) error { + return b.StoreLogs([]*raft.Log{log}) +} + +// StoreLogs is used to store a set of raft logs +func (b *BoltStore) StoreLogs(logs []*raft.Log) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + for _, log := range logs { + key := uint64ToBytes(log.Index) + val, err := encodeMsgPack(log) + if err != nil { + return err + } + + bucket := tx.Bucket(dbLogs) + if err := bucket.Put(key, val.Bytes()); err != nil { + return err + } + } + + return tx.Commit() +} + +// DeleteRange is used to delete logs within a given range inclusively. +func (b *BoltStore) DeleteRange(min, max uint64) error { + minKey := uint64ToBytes(min) + + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { + // Handle out-of-range log index + if bytesToUint64(k) > max { + break + } + + // Delete in-range log index + if err := curs.Delete(); err != nil { + return err + } + } + + return tx.Commit() +} + +// Set is used to set a key/value set outside of the raft log +func (b *BoltStore) Set(k, v []byte) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + if err := bucket.Put(k, v); err != nil { + return err + } + + return tx.Commit() +} + +// Get is used to retrieve a value from the k/v store by key +func (b *BoltStore) Get(k []byte) ([]byte, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return nil, err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + val := bucket.Get(k) + + if val == nil { + return nil, ErrKeyNotFound + } + + return append([]byte(nil), val...), nil +} + +// SetUint64 is like Set, but handles uint64 values +func (b *BoltStore) SetUint64(key []byte, val uint64) error { + return b.Set(key, uint64ToBytes(val)) +} + +// GetUint64 is like Get, but handles uint64 values +func (b *BoltStore) GetUint64(key []byte) (uint64, error) { + val, err := b.Get(key) + if err != nil { + return 0, err + } + return bytesToUint64(val), nil +} + +// Sync performs an fsync on the database file handle. This is not necessary +// under normal operation unless NoSync is enabled, in which this forces the +// database file to sync against the disk. +func (b *BoltStore) Sync() error { + return b.conn.Sync() +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/logstore/util.go b/vendor/github.com/hashicorp/vault/physical/raft/logstore/util.go new file mode 100644 index 00000000..d35dde8c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/logstore/util.go @@ -0,0 +1,37 @@ +package logstore + +import ( + "bytes" + "encoding/binary" + + "github.com/hashicorp/go-msgpack/codec" +) + +// Decode reverses the encode operation on a byte slice input +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint to a byte slice +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/raft.go b/vendor/github.com/hashicorp/vault/physical/raft/raft.go new file mode 100644 index 00000000..5e8ada7d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/raft.go @@ -0,0 +1,1075 @@ +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + snapshot "github.com/hashicorp/raft-snapshot" + raftboltdb "github.com/hashicorp/vault/physical/raft/logstore" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/seal" + + "github.com/hashicorp/vault/sdk/physical" +) + +// EnvVaultRaftNodeID is used to fetch the Raft node ID from the environment. +const EnvVaultRaftNodeID = "VAULT_RAFT_NODE_ID" + +// EnvVaultRaftPath is used to fetch the path where Raft data is stored from the environment. +const EnvVaultRaftPath = "VAULT_RAFT_PATH" + +// Verify RaftBackend satisfies the correct interfaces +var _ physical.Backend = (*RaftBackend)(nil) +var _ physical.Transactional = (*RaftBackend)(nil) + +var ( + // raftLogCacheSize is the maximum number of logs to cache in-memory. + // This is used to reduce disk I/O for the recently committed entries. + raftLogCacheSize = 512 + + raftState = "raft/" + peersFileName = "peers.json" + snapshotsRetained = 2 + + restoreOpDelayDuration = 5 * time.Second +) + +// RaftBackend implements the backend interfaces and uses the raft protocol to +// persist writes to the FSM. +type RaftBackend struct { + logger log.Logger + conf map[string]string + l sync.RWMutex + + // fsm is the state store for vault's data + fsm *FSM + + // raft is the instance of raft we will operate on. + raft *raft.Raft + + // raftNotifyCh is used to receive updates about leadership changes + // regarding this node. + raftNotifyCh chan bool + + // streamLayer is the network layer used to connect the nodes in the raft + // cluster. + streamLayer *raftLayer + + // raftTransport is the transport layer that the raft library uses for RPC + // communication. + raftTransport raft.Transport + + // snapStore is our snapshot mechanism. + snapStore raft.SnapshotStore + + // logStore is used by the raft library to store the raft logs in durable + // storage. + logStore raft.LogStore + + // stableStore is used by the raft library to store additional metadata in + // durable storage. + stableStore raft.StableStore + + // bootstrapConfig is only set when this node needs to be bootstrapped upon + // startup. + bootstrapConfig *raft.Configuration + + // dataDir is the location on the local filesystem that raft and FSM data + // will be stored. + dataDir string + + // localID is the ID for this node. This can either be configured in the + // config file, via a file on disk, or is otherwise randomly generated. + localID string + + // serverAddressProvider is used to map server IDs to addresses. + serverAddressProvider raft.ServerAddressProvider + + // permitPool is used to limit the number of concurrent storage calls. + permitPool *physical.PermitPool +} + +// EnsurePath is used to make sure a path exists +func EnsurePath(path string, dir bool) error { + if !dir { + path = filepath.Dir(path) + } + return os.MkdirAll(path, 0755) +} + +// NewRaftBackend constructs a RaftBackend using the given directory +func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Create the FSM. + fsm, err := NewFSM(conf, logger.Named("fsm")) + if err != nil { + return nil, fmt.Errorf("failed to create fsm: %v", err) + } + + path := os.Getenv(EnvVaultRaftPath) + if path == "" { + pathFromConfig, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + path = pathFromConfig + } + + // Build an all in-memory setup for dev mode, otherwise prepare a full + // disk-based setup. + var log raft.LogStore + var stable raft.StableStore + var snap raft.SnapshotStore + var devMode bool + if devMode { + store := raft.NewInmemStore() + stable = store + log = store + snap = raft.NewInmemSnapshotStore() + } else { + // Create the base raft path. + path := filepath.Join(path, raftState) + if err := EnsurePath(path, true); err != nil { + return nil, err + } + + // Create the backend raft store for logs and stable storage. + store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db")) + if err != nil { + return nil, err + } + stable = store + + // Wrap the store in a LogCache to improve performance. + cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) + if err != nil { + return nil, err + } + log = cacheStore + + // Create the snapshot store. + snapshots, err := NewBoltSnapshotStore(path, snapshotsRetained, logger.Named("snapshot"), fsm) + if err != nil { + return nil, err + } + snap = snapshots + } + + var localID string + { + // Determine the local node ID from the environment. + if raftNodeID := os.Getenv(EnvVaultRaftNodeID); raftNodeID != "" { + localID = raftNodeID + } + + // If not set in the environment check the configuration file. + if len(localID) == 0 { + localID = conf["node_id"] + } + + // If not set in the config check the "node-id" file. + if len(localID) == 0 { + localIDRaw, err := ioutil.ReadFile(filepath.Join(path, "node-id")) + switch { + case err == nil: + if len(localIDRaw) > 0 { + localID = string(localIDRaw) + } + case os.IsNotExist(err): + default: + return nil, err + } + } + + // If all of the above fails generate a UUID and persist it to the + // "node-id" file. + if len(localID) == 0 { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0600); err != nil { + return nil, err + } + + localID = id + } + } + + return &RaftBackend{ + logger: logger, + fsm: fsm, + conf: conf, + logStore: log, + stableStore: stable, + snapStore: snap, + dataDir: path, + localID: localID, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + }, nil +} + +// RaftServer has information about a server in the Raft configuration +type RaftServer struct { + // NodeID is the name of the server + NodeID string `json:"node_id"` + + // Address is the IP:port of the server, used for Raft communications + Address string `json:"address"` + + // Leader is true if this server is the current cluster leader + Leader bool `json:"leader"` + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string `json:"protocol_version"` + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online. + Voter bool `json:"voter"` +} + +// RaftConfigurationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer `json:"servers"` + + // Index has the Raft index of this configuration. + Index uint64 `json:"index"` +} + +// Peer defines the ID and Address for a given member of the raft cluster. +type Peer struct { + ID string `json:"id"` + Address string `json:"address"` +} + +// NodeID returns the identifier of the node +func (b *RaftBackend) NodeID() string { + return b.localID +} + +// Initialized tells if raft is running or not +func (b *RaftBackend) Initialized() bool { + b.l.RLock() + init := b.raft != nil + b.l.RUnlock() + return init +} + +// SetTLSKeyring is used to install a new keyring. If the active key has changed +// it will also close any network connections or streams forcing a reconnect +// with the new key. +func (b *RaftBackend) SetTLSKeyring(keyring *TLSKeyring) error { + b.l.RLock() + err := b.streamLayer.setTLSKeyring(keyring) + b.l.RUnlock() + + return err +} + +// SetServerAddressProvider sets a the address provider for determining the raft +// node addresses. This is currently only used in tests. +func (b *RaftBackend) SetServerAddressProvider(provider raft.ServerAddressProvider) { + b.l.Lock() + b.serverAddressProvider = provider + b.l.Unlock() +} + +// Bootstrap prepares the given peers to be part of the raft cluster +func (b *RaftBackend) Bootstrap(ctx context.Context, peers []Peer) error { + b.l.Lock() + defer b.l.Unlock() + + hasState, err := raft.HasExistingState(b.logStore, b.stableStore, b.snapStore) + if err != nil { + return err + } + + if hasState { + return errors.New("error bootstrapping cluster: cluster already has state") + } + + raftConfig := &raft.Configuration{ + Servers: make([]raft.Server, len(peers)), + } + + for i, p := range peers { + raftConfig.Servers[i] = raft.Server{ + ID: raft.ServerID(p.ID), + Address: raft.ServerAddress(p.Address), + } + } + + // Store the config for later use + b.bootstrapConfig = raftConfig + return nil +} + +// SetRestoreCallback sets the callback to be used when a restoreCallbackOp is +// processed through the FSM. +func (b *RaftBackend) SetRestoreCallback(restoreCb restoreCallback) { + b.fsm.l.Lock() + b.fsm.restoreCb = restoreCb + b.fsm.l.Unlock() +} + +func (b *RaftBackend) applyConfigSettings(config *raft.Config) error { + config.Logger = b.logger + multiplierRaw, ok := b.conf["performance_multiplier"] + multiplier := 5 + if ok { + var err error + multiplier, err = strconv.Atoi(multiplierRaw) + if err != nil { + return err + } + } + config.ElectionTimeout = config.ElectionTimeout * time.Duration(multiplier) + config.HeartbeatTimeout = config.HeartbeatTimeout * time.Duration(multiplier) + config.LeaderLeaseTimeout = config.LeaderLeaseTimeout * time.Duration(multiplier) + + snapThresholdRaw, ok := b.conf["snapshot_threshold"] + if ok { + var err error + snapThreshold, err := strconv.Atoi(snapThresholdRaw) + if err != nil { + return err + } + config.SnapshotThreshold = uint64(snapThreshold) + } + + trailingLogsRaw, ok := b.conf["trailing_logs"] + if ok { + var err error + trailingLogs, err := strconv.Atoi(trailingLogsRaw) + if err != nil { + return err + } + config.TrailingLogs = uint64(trailingLogs) + } + + config.NoSnapshotRestoreOnStart = true + config.MaxAppendEntries = 64 + + return nil +} + +// SetupOpts are used to pass options to the raft setup function. +type SetupOpts struct { + // TLSKeyring is the keyring to use for the cluster traffic. + TLSKeyring *TLSKeyring + + // ClusterListener is the cluster hook used to register the raft handler and + // client with core's cluster listeners. + ClusterListener cluster.ClusterHook + + // StartAsLeader is used to specify this node should start as leader and + // bypass the leader election. This should be used with caution. + StartAsLeader bool + + // RecoveryModeConfig is the configuration for the raft cluster in recovery + // mode. + RecoveryModeConfig *raft.Configuration +} + +func (b *RaftBackend) StartRecoveryCluster(ctx context.Context, peer Peer) error { + recoveryModeConfig := &raft.Configuration{ + Servers: []raft.Server{ + { + ID: raft.ServerID(peer.ID), + Address: raft.ServerAddress(peer.Address), + }, + }, + } + + return b.SetupCluster(context.Background(), SetupOpts{ + StartAsLeader: true, + RecoveryModeConfig: recoveryModeConfig, + }) +} + +// SetupCluster starts the raft cluster and enables the networking needed for +// the raft nodes to communicate. +func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { + b.logger.Trace("setting up raft cluster") + + b.l.Lock() + defer b.l.Unlock() + + // We are already unsealed + if b.raft != nil { + b.logger.Debug("raft already started, not setting up cluster") + return nil + } + + if len(b.localID) == 0 { + return errors.New("no local node id configured") + } + + // Setup the raft config + raftConfig := raft.DefaultConfig() + if err := b.applyConfigSettings(raftConfig); err != nil { + return err + } + + switch { + case opts.TLSKeyring == nil && opts.ClusterListener == nil: + // If we don't have a provided network we use an in-memory one. + // This allows us to bootstrap a node without bringing up a cluster + // network. This will be true during bootstrap, tests and dev modes. + _, b.raftTransport = raft.NewInmemTransportWithTimeout(raft.ServerAddress(b.localID), time.Second) + case opts.TLSKeyring == nil: + return errors.New("no keyring provided") + case opts.ClusterListener == nil: + return errors.New("no cluster listener provided") + default: + // Load the base TLS config from the cluster listener. + baseTLSConfig, err := opts.ClusterListener.TLSConfig(ctx) + if err != nil { + return err + } + + // Set the local address and localID in the streaming layer and the raft config. + streamLayer, err := NewRaftLayer(b.logger.Named("stream"), opts.TLSKeyring, opts.ClusterListener.Addr(), baseTLSConfig) + if err != nil { + return err + } + transConfig := &raft.NetworkTransportConfig{ + Stream: streamLayer, + MaxPool: 3, + Timeout: 10 * time.Second, + ServerAddressProvider: b.serverAddressProvider, + } + transport := raft.NewNetworkTransportWithConfig(transConfig) + + b.streamLayer = streamLayer + b.raftTransport = transport + } + + raftConfig.LocalID = raft.ServerID(b.localID) + + // Set up a channel for reliable leader notifications. + raftNotifyCh := make(chan bool, 1) + raftConfig.NotifyCh = raftNotifyCh + + // If we have a bootstrapConfig set we should bootstrap now. + if b.bootstrapConfig != nil { + bootstrapConfig := b.bootstrapConfig + // Unset the bootstrap config + b.bootstrapConfig = nil + + // Bootstrap raft with our known cluster members. + if err := raft.BootstrapCluster(raftConfig, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *bootstrapConfig); err != nil { + return err + } + // If we are the only node we should start as the leader. + if len(bootstrapConfig.Servers) == 1 { + opts.StartAsLeader = true + } + } + + raftConfig.StartAsLeader = opts.StartAsLeader + // Setup the Raft store. + b.fsm.SetNoopRestore(true) + + raftPath := filepath.Join(b.dataDir, raftState) + peersFile := filepath.Join(raftPath, peersFileName) + _, err := os.Stat(peersFile) + if err == nil { + b.logger.Info("raft recovery initiated", "recovery_file", peersFileName) + + recoveryConfig, err := raft.ReadConfigJSON(peersFile) + if err != nil { + return errwrap.Wrapf("raft recovery failed to parse peers.json: {{err}}", err) + } + + b.logger.Info("raft recovery: found new config", "config", recoveryConfig) + err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, recoveryConfig) + if err != nil { + return errwrap.Wrapf("raft recovery failed: {{err}}", err) + } + + err = os.Remove(peersFile) + if err != nil { + return errwrap.Wrapf("raft recovery failed to delete peers.json; please delete manually: {{err}}", err) + } + b.logger.Info("raft recovery deleted peers.json") + } + + if opts.RecoveryModeConfig != nil { + err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *opts.RecoveryModeConfig) + if err != nil { + return errwrap.Wrapf("recovering raft cluster failed: {{err}}", err) + } + } + + raftObj, err := raft.NewRaft(raftConfig, b.fsm.chunker, b.logStore, b.stableStore, b.snapStore, b.raftTransport) + b.fsm.SetNoopRestore(false) + if err != nil { + return err + } + b.raft = raftObj + b.raftNotifyCh = raftNotifyCh + + if b.streamLayer != nil { + // Add Handler to the cluster. + opts.ClusterListener.AddHandler(consts.RaftStorageALPN, b.streamLayer) + + // Add Client to the cluster. + opts.ClusterListener.AddClient(consts.RaftStorageALPN, b.streamLayer) + } + + return nil +} + +// TeardownCluster shuts down the raft cluster +func (b *RaftBackend) TeardownCluster(clusterListener cluster.ClusterHook) error { + if clusterListener != nil { + clusterListener.StopHandler(consts.RaftStorageALPN) + clusterListener.RemoveClient(consts.RaftStorageALPN) + } + + b.l.Lock() + future := b.raft.Shutdown() + b.raft = nil + b.l.Unlock() + + return future.Error() +} + +// AppliedIndex returns the latest index applied to the FSM +func (b *RaftBackend) AppliedIndex() uint64 { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return 0 + } + + return b.raft.AppliedIndex() +} + +// RemovePeer removes the given peer ID from the raft cluster. If the node is +// ourselves we will give up leadership. +func (b *RaftBackend) RemovePeer(ctx context.Context, peerID string) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + future := b.raft.RemoveServer(raft.ServerID(peerID), 0, 0) + + return future.Error() +} + +func (b *RaftBackend) GetConfiguration(ctx context.Context) (*RaftConfigurationResponse, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + config := &RaftConfigurationResponse{ + Index: future.Index(), + } + + for _, server := range future.Configuration().Servers { + entry := &RaftServer{ + NodeID: string(server.ID), + Address: string(server.Address), + // Since we only service this request on the active node our node ID + // denotes the raft leader. + Leader: string(server.ID) == b.NodeID(), + Voter: server.Suffrage == raft.Voter, + ProtocolVersion: strconv.Itoa(raft.ProtocolVersionMax), + } + config.Servers = append(config.Servers, entry) + } + + return config, nil +} + +// AddPeer adds a new server to the raft cluster +func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + b.logger.Debug("adding raft peer", "node_id", peerID, "cluster_addr", clusterAddr) + + future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) + return future.Error() +} + +// Peers returns all the servers present in the raft cluster +func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage backend is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + ret := make([]Peer, len(future.Configuration().Servers)) + for i, s := range future.Configuration().Servers { + ret[i] = Peer{ + ID: string(s.ID), + Address: string(s.Address), + } + } + + return ret, nil +} + +// Snapshot takes a raft snapshot, packages it into a archive file and writes it +// to the provided writer. Seal access is used to encrypt the SHASUM file so we +// can validate the snapshot was taken using the same master keys or not. +func (b *RaftBackend) Snapshot(out *logical.HTTPResponseWriter, access seal.Access) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage backend is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + snap, err := snapshot.NewWithSealer(b.logger.Named("snapshot"), b.raft, s) + if err != nil { + return err + } + defer snap.Close() + + size, err := snap.Size() + if err != nil { + return err + } + + out.Header().Add("Content-Disposition", "attachment") + out.Header().Add("Content-Length", fmt.Sprintf("%d", size)) + out.Header().Add("Content-Type", "application/gzip") + _, err = io.Copy(out, snap) + if err != nil { + return err + } + + return nil +} + +// WriteSnapshotToTemp reads a snapshot archive off the provided reader, +// extracts the data and writes the snapshot to a temporary file. The seal +// access is used to decrypt the SHASUM file in the archive to ensure this +// snapshot has the same master key as the running instance. If the provided +// access is nil then it will skip that validation. +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { + b.l.RLock() + defer b.l.RUnlock() + + var metadata raft.SnapshotMeta + if b.raft == nil { + return nil, nil, metadata, errors.New("raft storage backend is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, s) + return snap, cleanup, metadata, err +} + +// RestoreSnapshot applies the provided snapshot metadata and snapshot data to +// raft. +func (b *RaftBackend) RestoreSnapshot(ctx context.Context, metadata raft.SnapshotMeta, snap io.Reader) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + if err := b.raft.Restore(&metadata, snap, 0); err != nil { + b.logger.Named("snapshot").Error("failed to restore snapshot", "error", err) + return err + } + + // Apply a log that tells the follower nodes to run the restore callback + // function. This is done after the restore call so we can be sure the + // snapshot applied to a quorum of nodes. + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: restoreCallbackOp, + }, + }, + } + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + + // Do a best-effort attempt to let the standbys apply the restoreCallbackOp + // before we continue. + time.Sleep(restoreOpDelayDuration) + return err +} + +// Delete inserts an entry in the log to delete the given path +func (b *RaftBackend) Delete(ctx context.Context, path string) error { + defer metrics.MeasureSince([]string{"raft-storage", "delete"}, time.Now()) + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: deleteOp, + Key: path, + }, + }, + } + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// Get returns the value corresponding to the given path from the fsm +func (b *RaftBackend) Get(ctx context.Context, path string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"raft-storage", "get"}, time.Now()) + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + return b.fsm.Get(ctx, path) +} + +// Put inserts an entry in the log for the put operation +func (b *RaftBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"raft-storage", "put"}, time.Now()) + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: entry.Key, + Value: entry.Value, + }, + }, + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// List enumerates all the items under the prefix from the fsm +func (b *RaftBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"raft-storage", "list"}, time.Now()) + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + return b.fsm.List(ctx, prefix) +} + +// Transaction applies all the given operations into a single log and +// applies it. +func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + defer metrics.MeasureSince([]string{"raft-storage", "transaction"}, time.Now()) + command := &LogData{ + Operations: make([]*LogOperation, len(txns)), + } + for i, txn := range txns { + op := &LogOperation{} + switch txn.Operation { + case physical.PutOperation: + op.OpType = putOp + op.Key = txn.Entry.Key + op.Value = txn.Entry.Value + case physical.DeleteOperation: + op.OpType = deleteOp + op.Key = txn.Entry.Key + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + + command.Operations[i] = op + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// applyLog will take a given log command and apply it to the raft log. applyLog +// doesn't return until the log has been applied to a quorum of servers and is +// persisted to the local FSM. Caller should hold the backend's read lock. +func (b *RaftBackend) applyLog(ctx context.Context, command *LogData) error { + if b.raft == nil { + return errors.New("raft storage backend is not initialized") + } + + commandBytes, err := proto.Marshal(command) + if err != nil { + return err + } + + var chunked bool + var applyFuture raft.ApplyFuture + switch { + case len(commandBytes) <= raftchunking.ChunkSize: + applyFuture = b.raft.Apply(commandBytes, 0) + default: + chunked = true + applyFuture = raftchunking.ChunkingApply(commandBytes, nil, 0, b.raft.ApplyLog) + } + + if err := applyFuture.Error(); err != nil { + return err + } + + resp := applyFuture.Response() + + if chunked { + // In this case we didn't apply all chunks successfully, possibly due + // to a term change + if resp == nil { + // This returns the error in the interface because the raft library + // returns errors from the FSM via the future, not via err from the + // apply function. Downstream client code expects to see any error + // from the FSM (as opposed to the apply itself) and decide whether + // it can retry in the future's response. + return errors.New("applying chunking failed, please retry") + } + + // We expect that this conversion should always work + chunkedSuccess, ok := resp.(raftchunking.ChunkingSuccess) + if !ok { + return errors.New("unknown type of response back from chunking FSM") + } + + // Replace the reply with the inner wrapped version + resp = chunkedSuccess.Response + } + + if resp, ok := resp.(*FSMApplyResponse); !ok || !resp.Success { + return errors.New("could not apply data") + } + + return nil +} + +// HAEnabled is the implemention of the HABackend interface +func (b *RaftBackend) HAEnabled() bool { return true } + +// HAEnabled is the implemention of the HABackend interface +func (b *RaftBackend) LockWith(key, value string) (physical.Lock, error) { + return &RaftLock{ + key: key, + value: []byte(value), + b: b, + }, nil +} + +// RaftLock implements the physical Lock interface and enables HA for this +// backend. The Lock uses the raftNotifyCh for receiving leadership edge +// triggers. Vault's active duty matches raft's leadership. +type RaftLock struct { + key string + value []byte + + b *RaftBackend +} + +// monitorLeadership waits until we receive an update on the raftNotifyCh and +// closes the leaderLost channel. +func (l *RaftLock) monitorLeadership(stopCh <-chan struct{}, leaderNotifyCh <-chan bool) <-chan struct{} { + leaderLost := make(chan struct{}) + go func() { + select { + case isLeader := <-leaderNotifyCh: + if !isLeader { + close(leaderLost) + } + case <-stopCh: + } + }() + return leaderLost +} + +// Lock blocks until we become leader or are shutdown. It returns a channel that +// is closed when we detect a loss of leadership. +func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.b.l.RLock() + + // Cache the notifyCh locally + leaderNotifyCh := l.b.raftNotifyCh + + // TODO: Remove when Raft can server as the ha_storage backend. The internal + // raft pointer should not be nil here, but the nil check is a guard against + // https://github.com/hashicorp/vault/issues/8206 + if l.b.raft == nil { + return nil, errors.New("attempted to grab a lock on a nil raft backend") + } + + // Check to see if we are already leader. + if l.b.raft.State() == raft.Leader { + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + l.b.l.RUnlock() + + for { + select { + case isLeader := <-leaderNotifyCh: + if isLeader { + // We are leader, set the key + l.b.l.RLock() + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + case <-stopCh: + return nil, nil + } + } + + return nil, nil +} + +// Unlock gives up leadership. +func (l *RaftLock) Unlock() error { + return l.b.raft.LeadershipTransfer().Error() +} + +// Value reads the value of the lock. This informs us who is currently leader. +func (l *RaftLock) Value() (bool, string, error) { + e, err := l.b.Get(context.Background(), l.key) + if err != nil { + return false, "", err + } + if e == nil { + return false, "", nil + } + + value := string(e.Value) + // TODO: how to tell if held? + return true, value, nil +} + +// sealer implements the snapshot.Sealer interface and is used in the snapshot +// process for encrypting/decrypting the SHASUM file in snapshot archives. +type sealer struct { + access seal.Access +} + +// Seal encrypts the data with using the seal access object. +func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + eblob, err := s.access.Encrypt(ctx, pt) + if err != nil { + return nil, err + } + + return proto.Marshal(eblob) +} + +// Open decrypts the data using the seal access object. +func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + + var eblob physical.EncryptedBlobInfo + err := proto.Unmarshal(ct, &eblob) + if err != nil { + return nil, err + } + + return s.access.Decrypt(ctx, &eblob) +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/raft_util.go b/vendor/github.com/hashicorp/vault/physical/raft/raft_util.go new file mode 100644 index 00000000..0223f834 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/raft_util.go @@ -0,0 +1,13 @@ +// +build !enterprise + +package raft + +import ( + "context" + "errors" +) + +// AddPeer adds a new server to the raft cluster +func (b *RaftBackend) AddNonVotingPeer(ctx context.Context, peerID, clusterAddr string) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/snapshot.go b/vendor/github.com/hashicorp/vault/physical/raft/snapshot.go new file mode 100644 index 00000000..8538778b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/snapshot.go @@ -0,0 +1,288 @@ +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/raft" +) + +const ( + // boltSnapshotID is the stable ID for any boltDB snapshot. Keeping the ID + // stable means there is only ever one bolt snapshot in the system + boltSnapshotID = "bolt-snapshot" +) + +// BoltSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. The main difference between this +// store and the file store is we make the distinction between snapshots that +// have been written by the FSM and by internal Raft operations. The former are +// treated as noop snapshots on Persist and are read in full from the FSM on +// Open. The latter are treated like normal file snapshots and are able to be +// opened and applied as usual. +type BoltSnapshotStore struct { + // path is the directory in which to store file based snapshots + path string + // retain is the number of file based snapshots to keep + retain int + + // We hold a copy of the FSM so we can stream snapshots straight out of the + // database. + fsm *FSM + + // fileSnapStore is used to fall back to file snapshots when the data is + // being written from the raft library. This currently only happens on a + // follower during a snapshot install RPC. + fileSnapStore *raft.FileSnapshotStore + logger log.Logger +} + +// BoltSnapshotSink implements SnapshotSink optionally choosing to write to a +// file. +type BoltSnapshotSink struct { + store *BoltSnapshotStore + logger log.Logger + meta raft.SnapshotMeta + trans raft.Transport + + fileSink raft.SnapshotSink + l sync.Mutex + closed bool +} + +// NewBoltSnapshotStore creates a new BoltSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewBoltSnapshotStore(base string, retain int, logger log.Logger, fsm *FSM) (*BoltSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + return nil, fmt.Errorf("no logger provided") + } + + fileStore, err := raft.NewFileSnapshotStore(base, retain, nil) + if err != nil { + return nil, err + } + + // Setup the store + store := &BoltSnapshotStore{ + logger: logger, + fsm: fsm, + fileSnapStore: fileStore, + } + + { + // TODO: I think this needs to be done before every NewRaft and + // RecoverCluster call. Not just on Factory method. + + // Here we delete all the existing file based snapshots. This is necessary + // because we do not issue a restore on NewRaft. If a previous file snapshot + // had failed to apply we will be incorrectly setting the indexes. It's + // safer to simply delete all file snapshots on startup and rely on Raft to + // reconcile the FSM state. + if err := store.ReapSnapshots(); err != nil { + return nil, err + } + } + + return store, nil +} + +// Create is used to start a new snapshot +func (f *BoltSnapshotStore) Create(version raft.SnapshotVersion, index, term uint64, + configuration raft.Configuration, configurationIndex uint64, trans raft.Transport) (raft.SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // We are processing a snapshot, fastforward the index, term, and + // configuration to the latest seen by the raft system. This could include + // log indexes for operation types that are never sent to the FSM. + if err := f.fsm.witnessSnapshot(index, term, configurationIndex, configuration); err != nil { + return nil, err + } + + // Create the sink + sink := &BoltSnapshotSink{ + store: f, + logger: f.logger, + meta: raft.SnapshotMeta{ + Version: version, + ID: boltSnapshotID, + Index: index, + Term: term, + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + trans: trans, + } + + // Done + return sink, nil +} + +// List returns available snapshots in the store. It only returns bolt +// snapshots. No snapshot will be returned if there are no indexes in the +// FSM. +func (f *BoltSnapshotStore) List() ([]*raft.SnapshotMeta, error) { + meta, err := f.getBoltSnapshotMeta() + if err != nil { + return nil, err + } + + // If we haven't seen any data yet do not return a snapshot + if meta.Index == 0 { + return nil, nil + } + + return []*raft.SnapshotMeta{meta}, nil +} + +// getBoltSnapshotMeta returns the fsm's latest state and configuration. +func (f *BoltSnapshotStore) getBoltSnapshotMeta() (*raft.SnapshotMeta, error) { + latestIndex, latestConfig := f.fsm.LatestState() + meta := &raft.SnapshotMeta{ + Version: 1, + ID: boltSnapshotID, + Index: latestIndex.Index, + Term: latestIndex.Term, + } + + if latestConfig != nil { + index, configuration := protoConfigurationToRaftConfiguration(latestConfig) + meta.Configuration = configuration + meta.ConfigurationIndex = index + } + + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *BoltSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + var readCloser io.ReadCloser + var meta *raft.SnapshotMeta + switch id { + case boltSnapshotID: + + var err error + meta, err = f.getBoltSnapshotMeta() + if err != nil { + return nil, nil, err + } + // If we don't have any data return an error + if meta.Index == 0 { + return nil, nil, errors.New("no snapshot data") + } + + // Stream data out of the FSM to calculate the size + var writeCloser *io.PipeWriter + readCloser, writeCloser = io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + go func() { + f.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Compute the size + n, err := io.Copy(ioutil.Discard, metaReadCloser) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + metaReadCloser.Close() + readCloser.Close() + return nil, nil, err + } + + meta.Size = n + + default: + var err error + meta, readCloser, err = f.fileSnapStore.Open(id) + if err != nil { + return nil, nil, err + } + } + + return meta, readCloser, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *BoltSnapshotStore) ReapSnapshots() error { + return f.fileSnapStore.ReapSnapshots() +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *BoltSnapshotSink) ID() string { + s.l.Lock() + defer s.l.Unlock() + + if s.fileSink != nil { + return s.fileSink.ID() + } + + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *BoltSnapshotSink) Write(b []byte) (int, error) { + s.l.Lock() + defer s.l.Unlock() + + // If someone is writing to this sink then we need to create a file sink to + // capture the data. This currently only happens when a follower is being + // sent a snapshot. + if s.fileSink == nil { + fileSink, err := s.store.fileSnapStore.Create(s.meta.Version, s.meta.Index, s.meta.Term, s.meta.Configuration, s.meta.ConfigurationIndex, s.trans) + if err != nil { + return 0, err + } + s.fileSink = fileSink + } + + return s.fileSink.Write(b) +} + +// Close is used to indicate a successful end. +func (s *BoltSnapshotSink) Close() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.fileSink != nil { + return s.fileSink.Close() + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *BoltSnapshotSink) Cancel() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.fileSink != nil { + return s.fileSink.Cancel() + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/streamlayer.go b/vendor/github.com/hashicorp/vault/physical/raft/streamlayer.go new file mode 100644 index 00000000..e1fcedbd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/streamlayer.go @@ -0,0 +1,371 @@ +package raft + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "errors" + fmt "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "sync" + "time" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/cluster" +) + +// TLSKey is a single TLS keypair in the Keyring +type TLSKey struct { + // ID is a unique identifier for this Key + ID string `json:"id"` + + // KeyType defines the algorighm used to generate the private keys + KeyType string `json:"key_type"` + + // AppliedIndex is the earliest known raft index that safely contains this + // key. + AppliedIndex uint64 `json:"applied_index"` + + // CertBytes is the marshaled certificate. + CertBytes []byte `json:"cluster_cert"` + + // KeyParams is the marshaled private key. + KeyParams *certutil.ClusterKeyParams `json:"cluster_key_params"` + + // CreatedTime is the time this key was generated. This value is useful in + // determining when the next rotation should be. + CreatedTime time.Time `json:"created_time"` + + parsedCert *x509.Certificate + parsedKey *ecdsa.PrivateKey +} + +// TLSKeyring is the set of keys that raft uses for network communication. +// Only one key is used to dial at a time but both keys will be used to accept +// connections. +type TLSKeyring struct { + // Keys is the set of available key pairs + Keys []*TLSKey `json:"keys"` + + // AppliedIndex is the earliest known raft index that safely contains the + // latest key in the keyring. + AppliedIndex uint64 `json:"applied_index"` + + // Term is an incrementing identifier value used to quickly determine if two + // states of the keyring are different. + Term uint64 `json:"term"` + + // ActiveKeyID is the key ID to track the active key in the keyring. Only + // the active key is used for dialing. + ActiveKeyID string `json:"active_key_id"` +} + +// GetActive returns the active key. +func (k *TLSKeyring) GetActive() *TLSKey { + if k.ActiveKeyID == "" { + return nil + } + + for _, key := range k.Keys { + if key.ID == k.ActiveKeyID { + return key + } + } + return nil +} + +func GenerateTLSKey(reader io.Reader) (*TLSKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), reader) + if err != nil { + return nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + host = fmt.Sprintf("raft-%s", host) + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + // 30 years of single-active uptime ought to be enough for anybody + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) + } + + return &TLSKey{ + ID: host, + KeyType: certutil.PrivateKeyTypeP521, + CertBytes: certBytes, + KeyParams: &certutil.ClusterKeyParams{ + Type: certutil.PrivateKeyTypeP521, + X: key.PublicKey.X, + Y: key.PublicKey.Y, + D: key.D, + }, + CreatedTime: time.Now(), + }, nil +} + +// Make sure raftLayer satisfies the raft.StreamLayer interface +var _ raft.StreamLayer = (*raftLayer)(nil) + +// Make sure raftLayer satisfies the cluster.Handler and cluster.Client +// interfaces +var _ cluster.Handler = (*raftLayer)(nil) +var _ cluster.Client = (*raftLayer)(nil) + +// RaftLayer implements the raft.StreamLayer interface, +// so that we can use a single RPC layer for Raft and Vault +type raftLayer struct { + // Addr is the listener address to return + addr net.Addr + + // connCh is used to accept connections + connCh chan net.Conn + + // Tracks if we are closed + closed bool + closeCh chan struct{} + closeLock sync.Mutex + + logger log.Logger + + dialerFunc func(string, time.Duration) (net.Conn, error) + + // TLS config + keyring *TLSKeyring + baseTLSConfig *tls.Config +} + +// NewRaftLayer creates a new raftLayer object. It parses the TLS information +// from the network config. +func NewRaftLayer(logger log.Logger, raftTLSKeyring *TLSKeyring, clusterAddr net.Addr, baseTLSConfig *tls.Config) (*raftLayer, error) { + switch { + case clusterAddr == nil: + // Clustering disabled on the server, don't try to look for params + return nil, errors.New("no raft addr found") + } + + layer := &raftLayer{ + addr: clusterAddr, + connCh: make(chan net.Conn), + closeCh: make(chan struct{}), + logger: logger, + baseTLSConfig: baseTLSConfig, + } + + if err := layer.setTLSKeyring(raftTLSKeyring); err != nil { + return nil, err + } + + return layer, nil +} + +func (l *raftLayer) setTLSKeyring(keyring *TLSKeyring) error { + // Fast path a noop update + if l.keyring != nil && l.keyring.Term == keyring.Term { + return nil + } + + for _, key := range keyring.Keys { + switch { + case key.KeyParams == nil: + return errors.New("no raft cluster key params found") + + case key.KeyParams.X == nil, key.KeyParams.Y == nil, key.KeyParams.D == nil: + return errors.New("failed to parse raft cluster key") + + case key.KeyParams.Type != certutil.PrivateKeyTypeP521: + return errors.New("failed to find valid raft cluster key type") + + case len(key.CertBytes) == 0: + return errors.New("no cluster cert found") + } + + parsedCert, err := x509.ParseCertificate(key.CertBytes) + if err != nil { + return errwrap.Wrapf("error parsing raft cluster certificate: {{err}}", err) + } + + key.parsedCert = parsedCert + key.parsedKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: key.KeyParams.X, + Y: key.KeyParams.Y, + }, + D: key.KeyParams.D, + } + } + + if keyring.GetActive() == nil { + return errors.New("expected one active key to be present in the keyring") + } + + l.keyring = keyring + + return nil +} + +func (l *raftLayer) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { + for _, subj := range requestInfo.AcceptableCAs { + for _, key := range l.keyring.Keys { + if bytes.Equal(subj, key.parsedCert.RawIssuer) { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + } + + return nil, nil +} + +func (l *raftLayer) ServerLookup(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if l.keyring == nil { + return nil, errors.New("got raft connection but no local cert") + } + + for _, key := range l.keyring.Keys { + if clientHello.ServerName == key.ID { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + + return nil, nil +} + +// CALookup returns the CA to use when validating this connection. +func (l *raftLayer) CALookup(context.Context) ([]*x509.Certificate, error) { + ret := make([]*x509.Certificate, len(l.keyring.Keys)) + for i, key := range l.keyring.Keys { + ret[i] = key.parsedCert + } + return ret, nil +} + +// Stop shutsdown the raft layer. +func (l *raftLayer) Stop() error { + l.Close() + return nil +} + +// Handoff is used to hand off a connection to the +// RaftLayer. This allows it to be Accept()'ed +func (l *raftLayer) Handoff(ctx context.Context, wg *sync.WaitGroup, quit chan struct{}, conn *tls.Conn) error { + l.closeLock.Lock() + closed := l.closed + l.closeLock.Unlock() + + if closed { + return errors.New("raft is shutdown") + } + + wg.Add(1) + go func() { + defer wg.Done() + select { + case l.connCh <- conn: + case <-l.closeCh: + case <-ctx.Done(): + case <-quit: + } + }() + + return nil +} + +// Accept is used to return connection which are +// dialed to be used with the Raft layer +func (l *raftLayer) Accept() (net.Conn, error) { + select { + case conn := <-l.connCh: + return conn, nil + case <-l.closeCh: + return nil, fmt.Errorf("Raft RPC layer closed") + } +} + +// Close is used to stop listening for Raft connections +func (l *raftLayer) Close() error { + l.closeLock.Lock() + defer l.closeLock.Unlock() + + if !l.closed { + l.closed = true + close(l.closeCh) + } + return nil +} + +// Addr is used to return the address of the listener +func (l *raftLayer) Addr() net.Addr { + return l.addr +} + +// Dial is used to create a new outgoing connection +func (l *raftLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { + + tlsConfig := l.baseTLSConfig.Clone() + + key := l.keyring.GetActive() + if key == nil { + return nil, errors.New("no active key") + } + + tlsConfig.NextProtos = []string{consts.RaftStorageALPN} + tlsConfig.ServerName = key.parsedCert.Subject.CommonName + + l.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName) + + pool := x509.NewCertPool() + pool.AddCert(key.parsedCert) + tlsConfig.RootCAs = pool + tlsConfig.ClientCAs = pool + + dialer := &net.Dialer{ + Timeout: timeout, + } + return tls.DialWithDialer(dialer, "tcp", string(address), tlsConfig) +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/types.pb.go b/vendor/github.com/hashicorp/vault/physical/raft/types.pb.go new file mode 100644 index 00000000..b16b9f7b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/types.pb.go @@ -0,0 +1,311 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: physical/raft/types.proto + +package raft + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LogOperation struct { + // OpType is the Operation type + OpType uint32 `protobuf:"varint,1,opt,name=op_type,json=opType,proto3" json:"op_type,omitempty"` + // Flags is an opaque value, currently unused. Reserved. + Flags uint64 `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"` + // Key that is being affected + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Value is optional, corresponds to the key + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogOperation) Reset() { *m = LogOperation{} } +func (m *LogOperation) String() string { return proto.CompactTextString(m) } +func (*LogOperation) ProtoMessage() {} +func (*LogOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{0} +} + +func (m *LogOperation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogOperation.Unmarshal(m, b) +} +func (m *LogOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogOperation.Marshal(b, m, deterministic) +} +func (m *LogOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogOperation.Merge(m, src) +} +func (m *LogOperation) XXX_Size() int { + return xxx_messageInfo_LogOperation.Size(m) +} +func (m *LogOperation) XXX_DiscardUnknown() { + xxx_messageInfo_LogOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_LogOperation proto.InternalMessageInfo + +func (m *LogOperation) GetOpType() uint32 { + if m != nil { + return m.OpType + } + return 0 +} + +func (m *LogOperation) GetFlags() uint64 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *LogOperation) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LogOperation) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type LogData struct { + Operations []*LogOperation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogData) Reset() { *m = LogData{} } +func (m *LogData) String() string { return proto.CompactTextString(m) } +func (*LogData) ProtoMessage() {} +func (*LogData) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{1} +} + +func (m *LogData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogData.Unmarshal(m, b) +} +func (m *LogData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogData.Marshal(b, m, deterministic) +} +func (m *LogData) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogData.Merge(m, src) +} +func (m *LogData) XXX_Size() int { + return xxx_messageInfo_LogData.Size(m) +} +func (m *LogData) XXX_DiscardUnknown() { + xxx_messageInfo_LogData.DiscardUnknown(m) +} + +var xxx_messageInfo_LogData proto.InternalMessageInfo + +func (m *LogData) GetOperations() []*LogOperation { + if m != nil { + return m.Operations + } + return nil +} + +type IndexValue struct { + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexValue) Reset() { *m = IndexValue{} } +func (m *IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexValue) ProtoMessage() {} +func (*IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{2} +} + +func (m *IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexValue.Unmarshal(m, b) +} +func (m *IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexValue.Marshal(b, m, deterministic) +} +func (m *IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexValue.Merge(m, src) +} +func (m *IndexValue) XXX_Size() int { + return xxx_messageInfo_IndexValue.Size(m) +} +func (m *IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexValue proto.InternalMessageInfo + +func (m *IndexValue) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func (m *IndexValue) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +type Server struct { + Suffrage int32 `protobuf:"varint,1,opt,name=suffrage,proto3" json:"suffrage,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{3} +} + +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (m *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(m, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetSuffrage() int32 { + if m != nil { + return m.Suffrage + } + return 0 +} + +func (m *Server) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Server) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +type ConfigurationValue struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Servers []*Server `protobuf:"bytes,2,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigurationValue) Reset() { *m = ConfigurationValue{} } +func (m *ConfigurationValue) String() string { return proto.CompactTextString(m) } +func (*ConfigurationValue) ProtoMessage() {} +func (*ConfigurationValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{4} +} + +func (m *ConfigurationValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigurationValue.Unmarshal(m, b) +} +func (m *ConfigurationValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigurationValue.Marshal(b, m, deterministic) +} +func (m *ConfigurationValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigurationValue.Merge(m, src) +} +func (m *ConfigurationValue) XXX_Size() int { + return xxx_messageInfo_ConfigurationValue.Size(m) +} +func (m *ConfigurationValue) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigurationValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigurationValue proto.InternalMessageInfo + +func (m *ConfigurationValue) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ConfigurationValue) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +func init() { + proto.RegisterType((*LogOperation)(nil), "raft.LogOperation") + proto.RegisterType((*LogData)(nil), "raft.LogData") + proto.RegisterType((*IndexValue)(nil), "raft.IndexValue") + proto.RegisterType((*Server)(nil), "raft.Server") + proto.RegisterType((*ConfigurationValue)(nil), "raft.ConfigurationValue") +} + +func init() { proto.RegisterFile("physical/raft/types.proto", fileDescriptor_a8b3efb4def82ab3) } + +var fileDescriptor_a8b3efb4def82ab3 = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0xc9, 0xd6, 0xad, 0xee, 0x39, 0x45, 0x1e, 0x82, 0xd5, 0x53, 0xe9, 0x41, 0x8a, 0x87, + 0x16, 0x26, 0x78, 0xf3, 0xa2, 0x5e, 0x84, 0xa1, 0x10, 0xc5, 0x83, 0x17, 0xc9, 0xd6, 0xb4, 0x0d, + 0x76, 0x4b, 0x48, 0xd2, 0x61, 0xff, 0x7b, 0x49, 0x63, 0xc7, 0xbc, 0xbd, 0xaf, 0xfd, 0x92, 0xef, + 0xf7, 0xe5, 0xc1, 0xa5, 0xaa, 0x3b, 0x23, 0xd6, 0xac, 0xc9, 0x35, 0x2b, 0x6d, 0x6e, 0x3b, 0xc5, + 0x4d, 0xa6, 0xb4, 0xb4, 0x12, 0x03, 0xf7, 0x25, 0xe1, 0x30, 0x5f, 0xca, 0xea, 0x55, 0x71, 0xcd, + 0xac, 0x90, 0x5b, 0xbc, 0x80, 0x50, 0xaa, 0x2f, 0xe7, 0x8b, 0x48, 0x4c, 0xd2, 0x13, 0x3a, 0x95, + 0xea, 0xbd, 0x53, 0x1c, 0xcf, 0x61, 0x52, 0x36, 0xac, 0x32, 0xd1, 0x28, 0x26, 0x69, 0x40, 0xbd, + 0xc0, 0x33, 0x18, 0x7f, 0xf3, 0x2e, 0x1a, 0xc7, 0x24, 0x9d, 0x51, 0x37, 0x3a, 0xdf, 0x8e, 0x35, + 0x2d, 0x8f, 0x82, 0x98, 0xa4, 0x73, 0xea, 0x45, 0x72, 0x0f, 0xe1, 0x52, 0x56, 0x4f, 0xcc, 0x32, + 0x5c, 0x00, 0xc8, 0x21, 0xce, 0x44, 0x24, 0x1e, 0xa7, 0xc7, 0x0b, 0xcc, 0x1c, 0x4c, 0x76, 0x48, + 0x42, 0x0f, 0x5c, 0xc9, 0x1d, 0xc0, 0xf3, 0xb6, 0xe0, 0x3f, 0x1f, 0xee, 0x32, 0x44, 0x08, 0x2c, + 0xd7, 0x9b, 0x1e, 0x30, 0xa0, 0xfd, 0xec, 0x62, 0x85, 0x73, 0x0c, 0x78, 0xbd, 0x48, 0x5e, 0x60, + 0xfa, 0xc6, 0xf5, 0x8e, 0x6b, 0xbc, 0x82, 0x23, 0xd3, 0x96, 0xa5, 0x66, 0x95, 0x2f, 0x36, 0xa1, + 0x7b, 0x8d, 0xa7, 0x30, 0x12, 0x45, 0x7f, 0x70, 0x46, 0x47, 0xa2, 0xc0, 0x08, 0x42, 0x56, 0x14, + 0x9a, 0x1b, 0xf3, 0x57, 0x6c, 0x90, 0x09, 0x05, 0x7c, 0x94, 0xdb, 0x52, 0x54, 0xad, 0x27, 0xf3, + 0x3c, 0xfb, 0x6c, 0x72, 0x90, 0x8d, 0xd7, 0x10, 0x9a, 0x3e, 0xdb, 0x3d, 0x99, 0x2b, 0x39, 0xf7, + 0x25, 0x3d, 0x10, 0x1d, 0x7e, 0x3e, 0xdc, 0x7c, 0xa6, 0x95, 0xb0, 0x75, 0xbb, 0xca, 0xd6, 0x72, + 0x93, 0xd7, 0xcc, 0xd4, 0x62, 0x2d, 0xb5, 0xca, 0x77, 0xac, 0x6d, 0x6c, 0xfe, 0x6f, 0x7f, 0xab, + 0x69, 0xbf, 0xba, 0xdb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x85, 0xad, 0xad, 0xd7, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/physical/raft/types.proto b/vendor/github.com/hashicorp/vault/physical/raft/types.proto new file mode 100644 index 00000000..1bb923fa --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/raft/types.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/physical/raft"; + +package raft; + +message LogOperation { + // OpType is the Operation type + uint32 op_type = 1; + + // Flags is an opaque value, currently unused. Reserved. + uint64 flags = 2; + + // Key that is being affected + string key = 3; + + // Value is optional, corresponds to the key + bytes value = 4; +} + +message LogData { + repeated LogOperation operations = 1; +} + +message IndexValue { + uint64 term = 1; + uint64 index = 2; +} + +message Server { + int32 suffrage = 1; + string id = 2; + string address = 3; +} + +message ConfigurationValue { + uint64 index = 1; + repeated Server servers = 2; +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go new file mode 100644 index 00000000..f8bf307a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go @@ -0,0 +1,359 @@ +package mysql + +import ( + "context" + "database/sql" + "errors" + "strings" + "time" + + stdmysql "github.com/go-sql-driver/mysql" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/strutil" +) + +const ( + defaultMysqlRevocationStmts = ` + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; + DROP USER '{{name}}'@'%' + ` + + defaultMySQLRotateCredentialsSQL = ` + ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + ` + + mySQLTypeName = "mysql" +) + +var ( + MetadataLen int = 10 + LegacyMetadataLen int = 4 + UsernameLen int = 32 + LegacyUsernameLen int = 16 +) + +var _ dbplugin.Database = (*MySQL)(nil) + +type MySQL struct { + *connutil.SQLConnectionProducer + credsutil.CredentialsProducer +} + +// New implements builtinplugins.BuiltinFactory +func New(displayNameLen, roleNameLen, usernameLen int) func() (interface{}, error) { + return func() (interface{}, error) { + db := new(displayNameLen, roleNameLen, usernameLen) + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues) + + return dbType, nil + } +} + +func new(displayNameLen, roleNameLen, usernameLen int) *MySQL { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = mySQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: displayNameLen, + RoleNameLen: roleNameLen, + UsernameLen: usernameLen, + Separator: "-", + } + + return &MySQL{ + SQLConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + return runCommon(false, apiTLSConfig) +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func RunLegacy(apiTLSConfig *api.TLSConfig) error { + return runCommon(true, apiTLSConfig) +} + +func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error { + var f func() (interface{}, error) + if legacy { + f = New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen) + } else { + f = New(MetadataLen, MetadataLen, UsernameLen) + } + dbType, err := f() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig)) + + return nil +} + +func (m *MySQL) Type() (string, error) { + return mySQLTypeName, nil +} + +func (m *MySQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := m.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (m *MySQL) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + statements = dbutil.StatementCompatibilityHelper(statements) + + if len(statements.Creation) == 0 { + return "", "", dbutil.ErrEmptyCreationStatement + } + + username, err = m.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := m.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + queryMap := map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + } + + if err := m.executePreparedStatmentsWithMap(ctx, statements.Creation, queryMap); err != nil { + return "", "", err + } + return username, password, nil +} + +// NOOP +func (m *MySQL) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error { + return nil +} + +func (m *MySQL) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error { + // Grab the read lock + m.Lock() + defer m.Unlock() + + statements = dbutil.StatementCompatibilityHelper(statements) + + // Get the connection + db, err := m.getConnection(ctx) + if err != nil { + return err + } + + revocationStmts := statements.Revocation + // Use a default SQL statement for revocation if one cannot be fetched from the role + if len(revocationStmts) == 0 { + revocationStmts = []string{defaultMysqlRevocationStmts} + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + for _, stmt := range revocationStmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.Replace(query, "{{name}}", username, -1) + _, err = tx.ExecContext(ctx, query) + if err != nil { + return err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (m *MySQL) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) { + m.Lock() + defer m.Unlock() + + if len(m.Username) == 0 || len(m.Password) == 0 { + return nil, errors.New("username and password are required to rotate") + } + + rotateStatements := statements + if len(rotateStatements) == 0 { + rotateStatements = []string{defaultMySQLRotateCredentialsSQL} + } + + db, err := m.getConnection(ctx) + if err != nil { + return nil, err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer func() { + tx.Rollback() + }() + + password, err := m.GeneratePassword() + if err != nil { + return nil, err + } + + for _, stmt := range rotateStatements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.Replace(query, "{{username}}", m.Username, -1) + query = strings.Replace(query, "{{password}}", password, -1) + + if _, err := tx.ExecContext(ctx, query); err != nil { + return nil, err + } + } + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + if err := db.Close(); err != nil { + return nil, err + } + + m.RawConfig["password"] = password + return m.RawConfig, nil +} + +// SetCredentials uses provided information to set the password to a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for setting +// the password of static accounts, as well as rolling back passwords in the +// database in the event an updated database fails to save in Vault's storage. +func (m *MySQL) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + rotateStatements := statements.Rotation + if len(rotateStatements) == 0 { + rotateStatements = []string{defaultMySQLRotateCredentialsSQL} + } + + username = staticUser.Username + password = staticUser.Password + if username == "" || password == "" { + return "", "", errors.New("must provide both username and password") + } + + queryMap := map[string]string{ + "name": username, + "password": password, + } + + if err := m.executePreparedStatmentsWithMap(ctx, statements.Rotation, queryMap); err != nil { + return "", "", err + } + return username, password, nil +} + +// executePreparedStatmentsWithMap loops through the given templated SQL statements and +// applies the a map to them, interpolating values into the templates,returning +// tthe resulting username and password +func (m *MySQL) executePreparedStatmentsWithMap(ctx context.Context, statements []string, queryMap map[string]string) error { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection(ctx) + if err != nil { + return err + } + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + _ = tx.Rollback() + }() + + // Execute each query + for _, stmt := range statements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + query = dbutil.QueryHelper(query, queryMap) + + stmt, err := tx.PrepareContext(ctx, query) + if err != nil { + // If the error code we get back is Error 1295: This command is not + // supported in the prepared statement protocol yet, we will execute + // the statement without preparing it. This allows the caller to + // manually prepare statements, as well as run other not yet + // prepare supported commands. If there is no error when running we + // will continue to the next statement. + if e, ok := err.(*stdmysql.MySQLError); ok && e.Number == 1295 { + _, err = tx.ExecContext(ctx, query) + if err != nil { + stmt.Close() + return err + } + continue + } + + return err + } + if _, err := stmt.ExecContext(ctx); err != nil { + stmt.Close() + return err + } + stmt.Close() + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go new file mode 100644 index 00000000..34493f80 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go @@ -0,0 +1,502 @@ +package postgresql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/lib/pq" +) + +const ( + postgreSQLTypeName = "postgres" + defaultPostgresRenewSQL = ` +ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}'; +` + defaultPostgresRotateRootCredentialsSQL = ` +ALTER ROLE "{{username}}" WITH PASSWORD '{{password}}'; +` + + defaultPostgresRotateCredentialsSQL = ` +ALTER ROLE "{{name}}" WITH PASSWORD '{{password}}'; +` +) + +var _ dbplugin.Database = &PostgreSQL{} + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues) + return dbType, nil +} + +func new() *PostgreSQL { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = postgreSQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 8, + RoleNameLen: 8, + UsernameLen: 63, + Separator: "-", + } + + db := &PostgreSQL{ + SQLConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return db +} + +// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig)) + + return nil +} + +type PostgreSQL struct { + *connutil.SQLConnectionProducer + credsutil.CredentialsProducer +} + +func (p *PostgreSQL) Type() (string, error) { + return postgreSQLTypeName, nil +} + +func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := p.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (p *PostgreSQL) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + if len(statements.Rotation) == 0 { + return "", "", errors.New("empty rotation statements") + } + + username = staticUser.Username + password = staticUser.Password + if username == "" || password == "" { + return "", "", errors.New("must provide both username and password") + } + + // Grab the lock + p.Lock() + defer p.Unlock() + + // Get the connection + db, err := p.getConnection(ctx) + if err != nil { + return "", "", err + } + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return "", "", err + } + + // Vault requires the database user already exist, and that the credentials + // used to execute the rotation statements has sufficient privileges. + stmts := statements.Rotation + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return "", "", err + } + defer func() { + _ = tx.Rollback() + }() + + // Execute each query + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": staticUser.Username, + "password": password, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return "", "", err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +func (p *PostgreSQL) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + statements = dbutil.StatementCompatibilityHelper(statements) + + if len(statements.Creation) == 0 { + return "", "", dbutil.ErrEmptyCreationStatement + } + + // Grab the lock + p.Lock() + defer p.Unlock() + + username, err = p.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = p.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Get the connection + db, err := p.getConnection(ctx) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return "", "", err + + } + defer func() { + tx.Rollback() + }() + + // Execute each query + for _, stmt := range statements.Creation { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return "", "", err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +func (p *PostgreSQL) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error { + p.Lock() + defer p.Unlock() + + statements = dbutil.StatementCompatibilityHelper(statements) + + renewStmts := statements.Renewal + if len(renewStmts) == 0 { + renewStmts = []string{defaultPostgresRenewSQL} + } + + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return err + } + + for _, stmt := range renewStmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "expiration": expirationStr, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return err + } + } + } + + return tx.Commit() +} + +func (p *PostgreSQL) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error { + // Grab the lock + p.Lock() + defer p.Unlock() + + statements = dbutil.StatementCompatibilityHelper(statements) + + if len(statements.Revocation) == 0 { + return p.defaultRevokeUser(ctx, username) + } + + return p.customRevokeUser(ctx, username, statements.Revocation) +} + +func (p *PostgreSQL) customRevokeUser(ctx context.Context, username string, revocationStmts []string) error { + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + for _, stmt := range revocationStmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return err + } + } + } + + return tx.Commit() +} + +func (p *PostgreSQL) defaultRevokeUser(ctx context.Context, username string) error { + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return err + } + + if !exists { + return nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.PrepareContext(ctx, "SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.QueryContext(ctx, username) + if err != nil { + return err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRowContext(ctx, "SELECT current_database();").Scan(&dbname); err != nil { + return err + } + + if dbname.Valid { + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE CONNECT ON DATABASE %s FROM %s;`, + pq.QuoteIdentifier(dbname.String), + pq.QuoteIdentifier(username))) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revocationStmts { + if err := dbtxn.ExecuteDBQuery(ctx, db, nil, query); err != nil { + lastStmtError = err + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err()) + } + if lastStmtError != nil { + return errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError) + } + + // Drop this user + stmt, err = db.PrepareContext(ctx, fmt.Sprintf( + `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username))) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.ExecContext(ctx); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) { + p.Lock() + defer p.Unlock() + + if len(p.Username) == 0 || len(p.Password) == 0 { + return nil, errors.New("username and password are required to rotate") + } + + rotateStatents := statements + if len(rotateStatents) == 0 { + rotateStatents = []string{defaultPostgresRotateRootCredentialsSQL} + } + + db, err := p.getConnection(ctx) + if err != nil { + return nil, err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer func() { + tx.Rollback() + }() + + password, err := p.GeneratePassword() + if err != nil { + return nil, err + } + + for _, stmt := range rotateStatents { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + m := map[string]string{ + "username": p.Username, + "password": password, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return nil, err + } + } + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + // Close the database connection to ensure no new connections come in + if err := db.Close(); err != nil { + return nil, err + } + + p.RawConfig["password"] = password + return p.RawConfig, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go new file mode 100644 index 00000000..907e5352 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go @@ -0,0 +1,89 @@ +package dbplugin + +import ( + "context" + "errors" + "sync" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close +// method to also call Kill() on the plugin.Client. +type DatabasePluginClient struct { + client *plugin.Client + sync.Mutex + + Database +} + +// This wraps the Close call and ensures we both close the database connection +// and kill the plugin. +func (dc *DatabasePluginClient) Close() error { + err := dc.Database.Close() + dc.client.Kill() + + return err +} + +// NewPluginClient returns a databaseRPCClient with a connection to a running +// plugin. The client is wrapped in a DatabasePluginClient object to ensure the +// plugin is killed on call of Close(). +func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { + + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: plugin.PluginSet{ + "database": new(GRPCDatabasePlugin), + }, + // Version 4 only supports gRPC + 4: plugin.PluginSet{ + "database": new(GRPCDatabasePlugin), + }, + } + + var client *plugin.Client + var err error + if isMetadataMode { + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSets, handshakeConfig, []string{}, logger) + } else { + client, err = pluginRunner.Run(ctx, sys, pluginSets, handshakeConfig, []string{}, logger) + } + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("database") + if err != nil { + return nil, err + } + + // We should have a database type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + var db Database + switch raw.(type) { + case *gRPCClient: + db = raw.(*gRPCClient) + default: + return nil, errors.New("unsupported client type") + } + + // Wrap RPC implementation in DatabasePluginClient + return &DatabasePluginClient{ + client: client, + Database: db, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.pb.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.pb.go new file mode 100644 index 00000000..c8200151 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.pb.go @@ -0,0 +1,1344 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sdk/database/dbplugin/database.proto + +package dbplugin + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Deprecated: Do not use. +type InitializeRequest struct { + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + VerifyConnection bool `protobuf:"varint,2,opt,name=verify_connection,json=verifyConnection,proto3" json:"verify_connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitializeRequest) Reset() { *m = InitializeRequest{} } +func (m *InitializeRequest) String() string { return proto.CompactTextString(m) } +func (*InitializeRequest) ProtoMessage() {} +func (*InitializeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{0} +} + +func (m *InitializeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitializeRequest.Unmarshal(m, b) +} +func (m *InitializeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitializeRequest.Marshal(b, m, deterministic) +} +func (m *InitializeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitializeRequest.Merge(m, src) +} +func (m *InitializeRequest) XXX_Size() int { + return xxx_messageInfo_InitializeRequest.Size(m) +} +func (m *InitializeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitializeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitializeRequest proto.InternalMessageInfo + +func (m *InitializeRequest) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +func (m *InitializeRequest) GetVerifyConnection() bool { + if m != nil { + return m.VerifyConnection + } + return false +} + +type InitRequest struct { + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + VerifyConnection bool `protobuf:"varint,2,opt,name=verify_connection,json=verifyConnection,proto3" json:"verify_connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitRequest) Reset() { *m = InitRequest{} } +func (m *InitRequest) String() string { return proto.CompactTextString(m) } +func (*InitRequest) ProtoMessage() {} +func (*InitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{1} +} + +func (m *InitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitRequest.Unmarshal(m, b) +} +func (m *InitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitRequest.Marshal(b, m, deterministic) +} +func (m *InitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitRequest.Merge(m, src) +} +func (m *InitRequest) XXX_Size() int { + return xxx_messageInfo_InitRequest.Size(m) +} +func (m *InitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitRequest proto.InternalMessageInfo + +func (m *InitRequest) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +func (m *InitRequest) GetVerifyConnection() bool { + if m != nil { + return m.VerifyConnection + } + return false +} + +type CreateUserRequest struct { + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + UsernameConfig *UsernameConfig `protobuf:"bytes,2,opt,name=username_config,json=usernameConfig,proto3" json:"username_config,omitempty"` + Expiration *timestamp.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserRequest) Reset() { *m = CreateUserRequest{} } +func (m *CreateUserRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUserRequest) ProtoMessage() {} +func (*CreateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{2} +} + +func (m *CreateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserRequest.Unmarshal(m, b) +} +func (m *CreateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserRequest.Marshal(b, m, deterministic) +} +func (m *CreateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserRequest.Merge(m, src) +} +func (m *CreateUserRequest) XXX_Size() int { + return xxx_messageInfo_CreateUserRequest.Size(m) +} +func (m *CreateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserRequest proto.InternalMessageInfo + +func (m *CreateUserRequest) GetStatements() *Statements { + if m != nil { + return m.Statements + } + return nil +} + +func (m *CreateUserRequest) GetUsernameConfig() *UsernameConfig { + if m != nil { + return m.UsernameConfig + } + return nil +} + +func (m *CreateUserRequest) GetExpiration() *timestamp.Timestamp { + if m != nil { + return m.Expiration + } + return nil +} + +type RenewUserRequest struct { + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Expiration *timestamp.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RenewUserRequest) Reset() { *m = RenewUserRequest{} } +func (m *RenewUserRequest) String() string { return proto.CompactTextString(m) } +func (*RenewUserRequest) ProtoMessage() {} +func (*RenewUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{3} +} + +func (m *RenewUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RenewUserRequest.Unmarshal(m, b) +} +func (m *RenewUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RenewUserRequest.Marshal(b, m, deterministic) +} +func (m *RenewUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RenewUserRequest.Merge(m, src) +} +func (m *RenewUserRequest) XXX_Size() int { + return xxx_messageInfo_RenewUserRequest.Size(m) +} +func (m *RenewUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RenewUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RenewUserRequest proto.InternalMessageInfo + +func (m *RenewUserRequest) GetStatements() *Statements { + if m != nil { + return m.Statements + } + return nil +} + +func (m *RenewUserRequest) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *RenewUserRequest) GetExpiration() *timestamp.Timestamp { + if m != nil { + return m.Expiration + } + return nil +} + +type RevokeUserRequest struct { + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserRequest) Reset() { *m = RevokeUserRequest{} } +func (m *RevokeUserRequest) String() string { return proto.CompactTextString(m) } +func (*RevokeUserRequest) ProtoMessage() {} +func (*RevokeUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{4} +} + +func (m *RevokeUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserRequest.Unmarshal(m, b) +} +func (m *RevokeUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserRequest.Marshal(b, m, deterministic) +} +func (m *RevokeUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserRequest.Merge(m, src) +} +func (m *RevokeUserRequest) XXX_Size() int { + return xxx_messageInfo_RevokeUserRequest.Size(m) +} +func (m *RevokeUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserRequest proto.InternalMessageInfo + +func (m *RevokeUserRequest) GetStatements() *Statements { + if m != nil { + return m.Statements + } + return nil +} + +func (m *RevokeUserRequest) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +type RotateRootCredentialsRequest struct { + Statements []string `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RotateRootCredentialsRequest) Reset() { *m = RotateRootCredentialsRequest{} } +func (m *RotateRootCredentialsRequest) String() string { return proto.CompactTextString(m) } +func (*RotateRootCredentialsRequest) ProtoMessage() {} +func (*RotateRootCredentialsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{5} +} + +func (m *RotateRootCredentialsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RotateRootCredentialsRequest.Unmarshal(m, b) +} +func (m *RotateRootCredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RotateRootCredentialsRequest.Marshal(b, m, deterministic) +} +func (m *RotateRootCredentialsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RotateRootCredentialsRequest.Merge(m, src) +} +func (m *RotateRootCredentialsRequest) XXX_Size() int { + return xxx_messageInfo_RotateRootCredentialsRequest.Size(m) +} +func (m *RotateRootCredentialsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RotateRootCredentialsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RotateRootCredentialsRequest proto.InternalMessageInfo + +func (m *RotateRootCredentialsRequest) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +type Statements struct { + // DEPRECATED, will be removed in 0.12 + CreationStatements string `protobuf:"bytes,1,opt,name=creation_statements,json=creationStatements,proto3" json:"creation_statements,omitempty"` // Deprecated: Do not use. + // DEPRECATED, will be removed in 0.12 + RevocationStatements string `protobuf:"bytes,2,opt,name=revocation_statements,json=revocationStatements,proto3" json:"revocation_statements,omitempty"` // Deprecated: Do not use. + // DEPRECATED, will be removed in 0.12 + RollbackStatements string `protobuf:"bytes,3,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` // Deprecated: Do not use. + // DEPRECATED, will be removed in 0.12 + RenewStatements string `protobuf:"bytes,4,opt,name=renew_statements,json=renewStatements,proto3" json:"renew_statements,omitempty"` // Deprecated: Do not use. + Creation []string `protobuf:"bytes,5,rep,name=creation,proto3" json:"creation,omitempty"` + Revocation []string `protobuf:"bytes,6,rep,name=revocation,proto3" json:"revocation,omitempty"` + Rollback []string `protobuf:"bytes,7,rep,name=rollback,proto3" json:"rollback,omitempty"` + Renewal []string `protobuf:"bytes,8,rep,name=renewal,proto3" json:"renewal,omitempty"` + Rotation []string `protobuf:"bytes,9,rep,name=rotation,proto3" json:"rotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Statements) Reset() { *m = Statements{} } +func (m *Statements) String() string { return proto.CompactTextString(m) } +func (*Statements) ProtoMessage() {} +func (*Statements) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{6} +} + +func (m *Statements) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Statements.Unmarshal(m, b) +} +func (m *Statements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Statements.Marshal(b, m, deterministic) +} +func (m *Statements) XXX_Merge(src proto.Message) { + xxx_messageInfo_Statements.Merge(m, src) +} +func (m *Statements) XXX_Size() int { + return xxx_messageInfo_Statements.Size(m) +} +func (m *Statements) XXX_DiscardUnknown() { + xxx_messageInfo_Statements.DiscardUnknown(m) +} + +var xxx_messageInfo_Statements proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *Statements) GetCreationStatements() string { + if m != nil { + return m.CreationStatements + } + return "" +} + +// Deprecated: Do not use. +func (m *Statements) GetRevocationStatements() string { + if m != nil { + return m.RevocationStatements + } + return "" +} + +// Deprecated: Do not use. +func (m *Statements) GetRollbackStatements() string { + if m != nil { + return m.RollbackStatements + } + return "" +} + +// Deprecated: Do not use. +func (m *Statements) GetRenewStatements() string { + if m != nil { + return m.RenewStatements + } + return "" +} + +func (m *Statements) GetCreation() []string { + if m != nil { + return m.Creation + } + return nil +} + +func (m *Statements) GetRevocation() []string { + if m != nil { + return m.Revocation + } + return nil +} + +func (m *Statements) GetRollback() []string { + if m != nil { + return m.Rollback + } + return nil +} + +func (m *Statements) GetRenewal() []string { + if m != nil { + return m.Renewal + } + return nil +} + +func (m *Statements) GetRotation() []string { + if m != nil { + return m.Rotation + } + return nil +} + +type UsernameConfig struct { + DisplayName string `protobuf:"bytes,1,opt,name=DisplayName,proto3" json:"DisplayName,omitempty"` + RoleName string `protobuf:"bytes,2,opt,name=RoleName,proto3" json:"RoleName,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UsernameConfig) Reset() { *m = UsernameConfig{} } +func (m *UsernameConfig) String() string { return proto.CompactTextString(m) } +func (*UsernameConfig) ProtoMessage() {} +func (*UsernameConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{7} +} + +func (m *UsernameConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UsernameConfig.Unmarshal(m, b) +} +func (m *UsernameConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UsernameConfig.Marshal(b, m, deterministic) +} +func (m *UsernameConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsernameConfig.Merge(m, src) +} +func (m *UsernameConfig) XXX_Size() int { + return xxx_messageInfo_UsernameConfig.Size(m) +} +func (m *UsernameConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UsernameConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UsernameConfig proto.InternalMessageInfo + +func (m *UsernameConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *UsernameConfig) GetRoleName() string { + if m != nil { + return m.RoleName + } + return "" +} + +type InitResponse struct { + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitResponse) Reset() { *m = InitResponse{} } +func (m *InitResponse) String() string { return proto.CompactTextString(m) } +func (*InitResponse) ProtoMessage() {} +func (*InitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{8} +} + +func (m *InitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitResponse.Unmarshal(m, b) +} +func (m *InitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitResponse.Marshal(b, m, deterministic) +} +func (m *InitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitResponse.Merge(m, src) +} +func (m *InitResponse) XXX_Size() int { + return xxx_messageInfo_InitResponse.Size(m) +} +func (m *InitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitResponse proto.InternalMessageInfo + +func (m *InitResponse) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +type CreateUserResponse struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserResponse) Reset() { *m = CreateUserResponse{} } +func (m *CreateUserResponse) String() string { return proto.CompactTextString(m) } +func (*CreateUserResponse) ProtoMessage() {} +func (*CreateUserResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{9} +} + +func (m *CreateUserResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserResponse.Unmarshal(m, b) +} +func (m *CreateUserResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserResponse.Marshal(b, m, deterministic) +} +func (m *CreateUserResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserResponse.Merge(m, src) +} +func (m *CreateUserResponse) XXX_Size() int { + return xxx_messageInfo_CreateUserResponse.Size(m) +} +func (m *CreateUserResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserResponse proto.InternalMessageInfo + +func (m *CreateUserResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *CreateUserResponse) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type TypeResponse struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeResponse) Reset() { *m = TypeResponse{} } +func (m *TypeResponse) String() string { return proto.CompactTextString(m) } +func (*TypeResponse) ProtoMessage() {} +func (*TypeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{10} +} + +func (m *TypeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeResponse.Unmarshal(m, b) +} +func (m *TypeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeResponse.Marshal(b, m, deterministic) +} +func (m *TypeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeResponse.Merge(m, src) +} +func (m *TypeResponse) XXX_Size() int { + return xxx_messageInfo_TypeResponse.Size(m) +} +func (m *TypeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TypeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeResponse proto.InternalMessageInfo + +func (m *TypeResponse) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +type RotateRootCredentialsResponse struct { + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RotateRootCredentialsResponse) Reset() { *m = RotateRootCredentialsResponse{} } +func (m *RotateRootCredentialsResponse) String() string { return proto.CompactTextString(m) } +func (*RotateRootCredentialsResponse) ProtoMessage() {} +func (*RotateRootCredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{11} +} + +func (m *RotateRootCredentialsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RotateRootCredentialsResponse.Unmarshal(m, b) +} +func (m *RotateRootCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RotateRootCredentialsResponse.Marshal(b, m, deterministic) +} +func (m *RotateRootCredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RotateRootCredentialsResponse.Merge(m, src) +} +func (m *RotateRootCredentialsResponse) XXX_Size() int { + return xxx_messageInfo_RotateRootCredentialsResponse.Size(m) +} +func (m *RotateRootCredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RotateRootCredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RotateRootCredentialsResponse proto.InternalMessageInfo + +func (m *RotateRootCredentialsResponse) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{12} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type GenerateCredentialsResponse struct { + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GenerateCredentialsResponse) Reset() { *m = GenerateCredentialsResponse{} } +func (m *GenerateCredentialsResponse) String() string { return proto.CompactTextString(m) } +func (*GenerateCredentialsResponse) ProtoMessage() {} +func (*GenerateCredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{13} +} + +func (m *GenerateCredentialsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GenerateCredentialsResponse.Unmarshal(m, b) +} +func (m *GenerateCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GenerateCredentialsResponse.Marshal(b, m, deterministic) +} +func (m *GenerateCredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenerateCredentialsResponse.Merge(m, src) +} +func (m *GenerateCredentialsResponse) XXX_Size() int { + return xxx_messageInfo_GenerateCredentialsResponse.Size(m) +} +func (m *GenerateCredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GenerateCredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GenerateCredentialsResponse proto.InternalMessageInfo + +func (m *GenerateCredentialsResponse) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type StaticUserConfig struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Create bool `protobuf:"varint,3,opt,name=create,proto3" json:"create,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StaticUserConfig) Reset() { *m = StaticUserConfig{} } +func (m *StaticUserConfig) String() string { return proto.CompactTextString(m) } +func (*StaticUserConfig) ProtoMessage() {} +func (*StaticUserConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{14} +} + +func (m *StaticUserConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StaticUserConfig.Unmarshal(m, b) +} +func (m *StaticUserConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StaticUserConfig.Marshal(b, m, deterministic) +} +func (m *StaticUserConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_StaticUserConfig.Merge(m, src) +} +func (m *StaticUserConfig) XXX_Size() int { + return xxx_messageInfo_StaticUserConfig.Size(m) +} +func (m *StaticUserConfig) XXX_DiscardUnknown() { + xxx_messageInfo_StaticUserConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_StaticUserConfig proto.InternalMessageInfo + +func (m *StaticUserConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *StaticUserConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *StaticUserConfig) GetCreate() bool { + if m != nil { + return m.Create + } + return false +} + +type SetCredentialsRequest struct { + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + StaticUserConfig *StaticUserConfig `protobuf:"bytes,2,opt,name=static_user_config,json=staticUserConfig,proto3" json:"static_user_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetCredentialsRequest) Reset() { *m = SetCredentialsRequest{} } +func (m *SetCredentialsRequest) String() string { return proto.CompactTextString(m) } +func (*SetCredentialsRequest) ProtoMessage() {} +func (*SetCredentialsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{15} +} + +func (m *SetCredentialsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetCredentialsRequest.Unmarshal(m, b) +} +func (m *SetCredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetCredentialsRequest.Marshal(b, m, deterministic) +} +func (m *SetCredentialsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetCredentialsRequest.Merge(m, src) +} +func (m *SetCredentialsRequest) XXX_Size() int { + return xxx_messageInfo_SetCredentialsRequest.Size(m) +} +func (m *SetCredentialsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetCredentialsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetCredentialsRequest proto.InternalMessageInfo + +func (m *SetCredentialsRequest) GetStatements() *Statements { + if m != nil { + return m.Statements + } + return nil +} + +func (m *SetCredentialsRequest) GetStaticUserConfig() *StaticUserConfig { + if m != nil { + return m.StaticUserConfig + } + return nil +} + +type SetCredentialsResponse struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetCredentialsResponse) Reset() { *m = SetCredentialsResponse{} } +func (m *SetCredentialsResponse) String() string { return proto.CompactTextString(m) } +func (*SetCredentialsResponse) ProtoMessage() {} +func (*SetCredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{16} +} + +func (m *SetCredentialsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetCredentialsResponse.Unmarshal(m, b) +} +func (m *SetCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetCredentialsResponse.Marshal(b, m, deterministic) +} +func (m *SetCredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetCredentialsResponse.Merge(m, src) +} +func (m *SetCredentialsResponse) XXX_Size() int { + return xxx_messageInfo_SetCredentialsResponse.Size(m) +} +func (m *SetCredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetCredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetCredentialsResponse proto.InternalMessageInfo + +func (m *SetCredentialsResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *SetCredentialsResponse) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func init() { + proto.RegisterType((*InitializeRequest)(nil), "dbplugin.InitializeRequest") + proto.RegisterType((*InitRequest)(nil), "dbplugin.InitRequest") + proto.RegisterType((*CreateUserRequest)(nil), "dbplugin.CreateUserRequest") + proto.RegisterType((*RenewUserRequest)(nil), "dbplugin.RenewUserRequest") + proto.RegisterType((*RevokeUserRequest)(nil), "dbplugin.RevokeUserRequest") + proto.RegisterType((*RotateRootCredentialsRequest)(nil), "dbplugin.RotateRootCredentialsRequest") + proto.RegisterType((*Statements)(nil), "dbplugin.Statements") + proto.RegisterType((*UsernameConfig)(nil), "dbplugin.UsernameConfig") + proto.RegisterType((*InitResponse)(nil), "dbplugin.InitResponse") + proto.RegisterType((*CreateUserResponse)(nil), "dbplugin.CreateUserResponse") + proto.RegisterType((*TypeResponse)(nil), "dbplugin.TypeResponse") + proto.RegisterType((*RotateRootCredentialsResponse)(nil), "dbplugin.RotateRootCredentialsResponse") + proto.RegisterType((*Empty)(nil), "dbplugin.Empty") + proto.RegisterType((*GenerateCredentialsResponse)(nil), "dbplugin.GenerateCredentialsResponse") + proto.RegisterType((*StaticUserConfig)(nil), "dbplugin.StaticUserConfig") + proto.RegisterType((*SetCredentialsRequest)(nil), "dbplugin.SetCredentialsRequest") + proto.RegisterType((*SetCredentialsResponse)(nil), "dbplugin.SetCredentialsResponse") +} + +func init() { + proto.RegisterFile("sdk/database/dbplugin/database.proto", fileDescriptor_cfa445f4444c6876) +} + +var fileDescriptor_cfa445f4444c6876 = []byte{ + // 839 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0x96, 0xf3, 0xb3, 0x9b, 0x9c, 0x5d, 0xed, 0x26, 0xd3, 0x66, 0x65, 0xb9, 0x85, 0x46, 0x23, + 0x28, 0x8b, 0x10, 0x31, 0xda, 0x82, 0x0a, 0xbd, 0x00, 0xd1, 0x14, 0x15, 0x24, 0x58, 0xa1, 0x49, + 0x7b, 0x83, 0x90, 0xa2, 0x89, 0x33, 0x9b, 0x58, 0xeb, 0x78, 0x8c, 0x67, 0x92, 0x12, 0x9e, 0x80, + 0x37, 0xe0, 0x96, 0x7b, 0x5e, 0x84, 0x87, 0xe1, 0x21, 0x90, 0xc7, 0x1e, 0x7b, 0xfc, 0xb3, 0xad, + 0xd4, 0x85, 0x3b, 0x9f, 0x39, 0xe7, 0x3b, 0xf3, 0x9d, 0x5f, 0x0f, 0xbc, 0x27, 0x96, 0xd7, 0xee, + 0x92, 0x4a, 0xba, 0xa0, 0x82, 0xb9, 0xcb, 0x45, 0x14, 0x6c, 0x57, 0x7e, 0x98, 0x9f, 0x4c, 0xa2, + 0x98, 0x4b, 0x8e, 0x7a, 0x5a, 0xe1, 0x3c, 0x58, 0x71, 0xbe, 0x0a, 0x98, 0xab, 0xce, 0x17, 0xdb, + 0x2b, 0x57, 0xfa, 0x1b, 0x26, 0x24, 0xdd, 0x44, 0xa9, 0x29, 0xfe, 0x19, 0x86, 0xdf, 0x85, 0xbe, + 0xf4, 0x69, 0xe0, 0xff, 0xc6, 0x08, 0xfb, 0x65, 0xcb, 0x84, 0x44, 0x67, 0x70, 0xe0, 0xf1, 0xf0, + 0xca, 0x5f, 0xd9, 0xd6, 0xd8, 0x3a, 0x3f, 0x26, 0x99, 0x84, 0x3e, 0x82, 0xe1, 0x8e, 0xc5, 0xfe, + 0xd5, 0x7e, 0xee, 0xf1, 0x30, 0x64, 0x9e, 0xf4, 0x79, 0x68, 0xb7, 0xc6, 0xd6, 0x79, 0x8f, 0x0c, + 0x52, 0xc5, 0x34, 0x3f, 0x7f, 0xd2, 0xb2, 0x2d, 0x4c, 0xe0, 0x28, 0xf1, 0xfe, 0x5f, 0xfa, 0xc5, + 0x7f, 0x5b, 0x30, 0x9c, 0xc6, 0x8c, 0x4a, 0xf6, 0x52, 0xb0, 0x58, 0xbb, 0xfe, 0x14, 0x40, 0x48, + 0x2a, 0xd9, 0x86, 0x85, 0x52, 0x28, 0xf7, 0x47, 0x17, 0x77, 0x27, 0x3a, 0x0f, 0x93, 0x59, 0xae, + 0x23, 0x86, 0x1d, 0xfa, 0x1a, 0x4e, 0xb7, 0x82, 0xc5, 0x21, 0xdd, 0xb0, 0x79, 0xc6, 0xac, 0xa5, + 0xa0, 0x76, 0x01, 0x7d, 0x99, 0x19, 0x4c, 0x95, 0x9e, 0x9c, 0x6c, 0x4b, 0x32, 0x7a, 0x02, 0xc0, + 0x7e, 0x8d, 0xfc, 0x98, 0x2a, 0xd2, 0x6d, 0x85, 0x76, 0x26, 0x69, 0xda, 0x27, 0x3a, 0xed, 0x93, + 0x17, 0x3a, 0xed, 0xc4, 0xb0, 0xc6, 0x7f, 0x5a, 0x30, 0x20, 0x2c, 0x64, 0xaf, 0x6e, 0x1f, 0x89, + 0x03, 0x3d, 0x4d, 0x4c, 0x85, 0xd0, 0x27, 0xb9, 0x7c, 0x2b, 0x8a, 0x0c, 0x86, 0x84, 0xed, 0xf8, + 0x35, 0xfb, 0x5f, 0x29, 0xe2, 0x2f, 0xe1, 0x3e, 0xe1, 0x89, 0x29, 0xe1, 0x5c, 0x4e, 0x63, 0xb6, + 0x64, 0x61, 0xd2, 0x93, 0x42, 0xdf, 0xf8, 0x6e, 0xe5, 0xc6, 0xf6, 0x79, 0xdf, 0xf4, 0x8d, 0xff, + 0x69, 0x01, 0x14, 0xd7, 0xa2, 0x47, 0x70, 0xc7, 0x4b, 0x5a, 0xc4, 0xe7, 0xe1, 0xbc, 0xc2, 0xb4, + 0xff, 0xb4, 0x65, 0x5b, 0x04, 0x69, 0xb5, 0x01, 0x7a, 0x0c, 0xa3, 0x98, 0xed, 0xb8, 0x57, 0x83, + 0xb5, 0x72, 0xd8, 0xdd, 0xc2, 0xa0, 0x7c, 0x5b, 0xcc, 0x83, 0x60, 0x41, 0xbd, 0x6b, 0x13, 0xd6, + 0x2e, 0x6e, 0xd3, 0x6a, 0x03, 0xf4, 0x31, 0x0c, 0xe2, 0xa4, 0xf4, 0x26, 0xa2, 0x93, 0x23, 0x4e, + 0x95, 0x6e, 0x56, 0x4a, 0x9e, 0xa6, 0x6c, 0x77, 0x55, 0xf8, 0xb9, 0x9c, 0x24, 0xa7, 0xe0, 0x65, + 0x1f, 0xa4, 0xc9, 0x29, 0x4e, 0x12, 0xac, 0x26, 0x60, 0x1f, 0xa6, 0x58, 0x2d, 0x23, 0x1b, 0x0e, + 0xd5, 0x55, 0x34, 0xb0, 0x7b, 0x4a, 0xa5, 0xc5, 0x14, 0x25, 0x53, 0x9f, 0x7d, 0x8d, 0x4a, 0x65, + 0x7c, 0x09, 0x27, 0xe5, 0xb1, 0x40, 0x63, 0x38, 0x7a, 0xe6, 0x8b, 0x28, 0xa0, 0xfb, 0xcb, 0xa4, + 0xbe, 0x2a, 0xd3, 0xc4, 0x3c, 0x4a, 0xfc, 0x11, 0x1e, 0xb0, 0x4b, 0xa3, 0xfc, 0x5a, 0xc6, 0x0f, + 0xe1, 0x38, 0xdd, 0x13, 0x22, 0xe2, 0xa1, 0x60, 0x37, 0x2d, 0x0a, 0xfc, 0x3d, 0x20, 0x73, 0xf4, + 0x33, 0x6b, 0xb3, 0xb1, 0xac, 0x4a, 0xef, 0x3b, 0xd0, 0x8b, 0xa8, 0x10, 0xaf, 0x78, 0xbc, 0xd4, + 0xb7, 0x6a, 0x19, 0x63, 0x38, 0x7e, 0xb1, 0x8f, 0x58, 0xee, 0x07, 0x41, 0x47, 0xee, 0x23, 0xed, + 0x43, 0x7d, 0xe3, 0xc7, 0xf0, 0xce, 0x0d, 0x8d, 0xf9, 0x06, 0xaa, 0x87, 0xd0, 0xfd, 0x66, 0x13, + 0xc9, 0x3d, 0xfe, 0x02, 0xee, 0x3d, 0x67, 0x21, 0x8b, 0xa9, 0x64, 0x4d, 0x78, 0x93, 0xa0, 0x55, + 0x21, 0xb8, 0x80, 0x41, 0xd2, 0x02, 0xbe, 0x97, 0x84, 0x9b, 0x25, 0xfa, 0x2d, 0x83, 0x55, 0x3c, + 0x55, 0xea, 0x54, 0x5f, 0xf6, 0x48, 0x26, 0xe1, 0x3f, 0x2c, 0x18, 0xcd, 0x58, 0xd3, 0xcc, 0xbd, + 0xdd, 0x94, 0x7f, 0x0b, 0x48, 0x28, 0xce, 0xf3, 0x84, 0x56, 0x79, 0xab, 0x3a, 0x65, 0xb4, 0x19, + 0x17, 0x19, 0x88, 0xca, 0x09, 0xfe, 0x11, 0xce, 0xaa, 0xc4, 0x6e, 0x57, 0xf0, 0x8b, 0xbf, 0xba, + 0xd0, 0x7b, 0x96, 0xfd, 0x2a, 0x91, 0x0b, 0x9d, 0xa4, 0xfa, 0xe8, 0xb4, 0x20, 0xa5, 0x0a, 0xe6, + 0x9c, 0x15, 0x07, 0xa5, 0xf6, 0x78, 0x0e, 0x50, 0x34, 0x1f, 0xba, 0x57, 0x58, 0xd5, 0xfe, 0x46, + 0xce, 0xfd, 0x66, 0x65, 0xe6, 0xe8, 0x73, 0xe8, 0xe7, 0x5b, 0x1f, 0x19, 0x39, 0xa9, 0xfe, 0x0a, + 0x9c, 0x2a, 0xb5, 0x64, 0x93, 0x17, 0xdb, 0xd8, 0xa4, 0x50, 0xdb, 0xd1, 0x75, 0xec, 0x1a, 0x46, + 0x8d, 0x9d, 0x8c, 0x1e, 0x1a, 0x6e, 0x5e, 0xb3, 0x83, 0x9d, 0x0f, 0xde, 0x68, 0x97, 0xc5, 0xf7, + 0x19, 0x74, 0x92, 0x69, 0x46, 0xa3, 0x02, 0x60, 0xbc, 0x02, 0xcc, 0xfc, 0x96, 0x86, 0xfe, 0x43, + 0xe8, 0x4e, 0x03, 0x2e, 0x1a, 0x2a, 0x52, 0x8b, 0x65, 0x06, 0x27, 0xe5, 0xd6, 0x40, 0x0f, 0x8c, + 0xd6, 0x6a, 0xea, 0x66, 0x67, 0x7c, 0xb3, 0x41, 0x76, 0xff, 0x0f, 0x70, 0xa7, 0x61, 0x50, 0xeb, + 0x6c, 0xde, 0x2f, 0x0e, 0x5e, 0x37, 0xd8, 0x5f, 0x01, 0x14, 0x2f, 0x2b, 0xb3, 0x56, 0xb5, 0xf7, + 0x56, 0x2d, 0x3e, 0xdc, 0xfe, 0xbd, 0x65, 0x3d, 0xbd, 0xf8, 0xe9, 0x93, 0x95, 0x2f, 0xd7, 0xdb, + 0xc5, 0xc4, 0xe3, 0x1b, 0x77, 0x4d, 0xc5, 0xda, 0xf7, 0x78, 0x1c, 0xb9, 0x3b, 0xba, 0x0d, 0xa4, + 0xdb, 0xf8, 0x10, 0x5c, 0x1c, 0xa8, 0xdf, 0xf9, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, + 0xf5, 0x87, 0x73, 0x28, 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DatabaseClient is the client API for Database service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DatabaseClient interface { + Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) + CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) + RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) + RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) + RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) + Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) + Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) + GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) + Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) +} + +type databaseClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseClient(cc *grpc.ClientConn) DatabaseClient { + return &databaseClient{cc} +} + +func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { + out := new(TypeResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Type", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) { + out := new(CreateUserResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/CreateUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RenewUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RevokeUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) { + out := new(RotateRootCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RotateRootCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) { + out := new(InitResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Init", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) { + out := new(SetCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/SetCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) { + out := new(GenerateCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/GenerateCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Deprecated: Do not use. +func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServer is the server API for Database service. +type DatabaseServer interface { + Type(context.Context, *Empty) (*TypeResponse, error) + CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) + RenewUser(context.Context, *RenewUserRequest) (*Empty, error) + RevokeUser(context.Context, *RevokeUserRequest) (*Empty, error) + RotateRootCredentials(context.Context, *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) + Init(context.Context, *InitRequest) (*InitResponse, error) + Close(context.Context, *Empty) (*Empty, error) + SetCredentials(context.Context, *SetCredentialsRequest) (*SetCredentialsResponse, error) + GenerateCredentials(context.Context, *Empty) (*GenerateCredentialsResponse, error) + Initialize(context.Context, *InitializeRequest) (*Empty, error) +} + +// UnimplementedDatabaseServer can be embedded to have forward compatible implementations. +type UnimplementedDatabaseServer struct { +} + +func (*UnimplementedDatabaseServer) Type(ctx context.Context, req *Empty) (*TypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Type not implemented") +} +func (*UnimplementedDatabaseServer) CreateUser(ctx context.Context, req *CreateUserRequest) (*CreateUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented") +} +func (*UnimplementedDatabaseServer) RenewUser(ctx context.Context, req *RenewUserRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RenewUser not implemented") +} +func (*UnimplementedDatabaseServer) RevokeUser(ctx context.Context, req *RevokeUserRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeUser not implemented") +} +func (*UnimplementedDatabaseServer) RotateRootCredentials(ctx context.Context, req *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RotateRootCredentials not implemented") +} +func (*UnimplementedDatabaseServer) Init(ctx context.Context, req *InitRequest) (*InitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") +} +func (*UnimplementedDatabaseServer) Close(ctx context.Context, req *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} +func (*UnimplementedDatabaseServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetCredentials not implemented") +} +func (*UnimplementedDatabaseServer) GenerateCredentials(ctx context.Context, req *Empty) (*GenerateCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateCredentials not implemented") +} +func (*UnimplementedDatabaseServer) Initialize(ctx context.Context, req *InitializeRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} + +func RegisterDatabaseServer(s *grpc.Server, srv DatabaseServer) { + s.RegisterService(&_Database_serviceDesc, srv) +} + +func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Type(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Type", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Type(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).CreateUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/CreateUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).CreateUser(ctx, req.(*CreateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RenewUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RenewUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RenewUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RenewUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RenewUser(ctx, req.(*RenewUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RevokeUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RevokeUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RevokeUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RevokeUser(ctx, req.(*RevokeUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RotateRootCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RotateRootCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RotateRootCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RotateRootCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RotateRootCredentials(ctx, req.(*RotateRootCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Init(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Init", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Init(ctx, req.(*InitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Close(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_SetCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).SetCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/SetCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).SetCredentials(ctx, req.(*SetCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_GenerateCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).GenerateCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/GenerateCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).GenerateCredentials(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Database_serviceDesc = grpc.ServiceDesc{ + ServiceName: "dbplugin.Database", + HandlerType: (*DatabaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Type", + Handler: _Database_Type_Handler, + }, + { + MethodName: "CreateUser", + Handler: _Database_CreateUser_Handler, + }, + { + MethodName: "RenewUser", + Handler: _Database_RenewUser_Handler, + }, + { + MethodName: "RevokeUser", + Handler: _Database_RevokeUser_Handler, + }, + { + MethodName: "RotateRootCredentials", + Handler: _Database_RotateRootCredentials_Handler, + }, + { + MethodName: "Init", + Handler: _Database_Init_Handler, + }, + { + MethodName: "Close", + Handler: _Database_Close_Handler, + }, + { + MethodName: "SetCredentials", + Handler: _Database_SetCredentials_Handler, + }, + { + MethodName: "GenerateCredentials", + Handler: _Database_GenerateCredentials_Handler, + }, + { + MethodName: "Initialize", + Handler: _Database_Initialize_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/database/dbplugin/database.proto", +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.proto b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.proto new file mode 100644 index 00000000..d8c20809 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/database.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; + +package dbplugin; + +import "google/protobuf/timestamp.proto"; + +message InitializeRequest { + option deprecated = true; + bytes config = 1; + bool verify_connection = 2; +} + +message InitRequest { + bytes config = 1; + bool verify_connection = 2; +} + +message CreateUserRequest { + Statements statements = 1; + UsernameConfig username_config = 2; + google.protobuf.Timestamp expiration = 3; +} + +message RenewUserRequest { + Statements statements = 1; + string username = 2; + google.protobuf.Timestamp expiration = 3; +} + +message RevokeUserRequest { + Statements statements = 1; + string username = 2; +} + +message RotateRootCredentialsRequest { + repeated string statements = 1; +} + +message Statements { + // DEPRECATED, will be removed in 0.12 + string creation_statements = 1 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string revocation_statements = 2 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string rollback_statements = 3 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string renew_statements = 4 [deprecated=true]; + + repeated string creation = 5; + repeated string revocation = 6; + repeated string rollback = 7; + repeated string renewal = 8; + repeated string rotation = 9; +} + +message UsernameConfig { + string DisplayName = 1; + string RoleName = 2; +} + +message InitResponse { + bytes config = 1; +} + +message CreateUserResponse { + string username = 1; + string password = 2; +} + +message TypeResponse { + string type = 1; +} + +message RotateRootCredentialsResponse { + bytes config = 1; +} + +message Empty {} + +message GenerateCredentialsResponse { + string password = 1; +} + +message StaticUserConfig{ + string username = 1; + string password = 2; + bool create = 3; +} + +message SetCredentialsRequest { + Statements statements = 1; + StaticUserConfig static_user_config = 2; +} + +message SetCredentialsResponse { + string username = 1; + string password = 2; +} + +service Database { + rpc Type(Empty) returns (TypeResponse); + rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); + rpc RenewUser(RenewUserRequest) returns (Empty); + rpc RevokeUser(RevokeUserRequest) returns (Empty); + rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); + rpc Init(InitRequest) returns (InitResponse); + rpc Close(Empty) returns (Empty); + rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); + rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); + + rpc Initialize(InitializeRequest) returns (Empty) { + option deprecated = true; + }; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/databasemiddleware.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/databasemiddleware.go new file mode 100644 index 00000000..19cfa337 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/databasemiddleware.go @@ -0,0 +1,335 @@ +package dbplugin + +import ( + "context" + "errors" + "net/url" + "strings" + "sync" + "time" + + "github.com/hashicorp/errwrap" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" +) + +// ---- Tracing Middleware Domain ---- + +// databaseTracingMiddleware wraps a implementation of Database and executes +// trace logging on function call. +type databaseTracingMiddleware struct { + next Database + logger log.Logger +} + +func (mw *databaseTracingMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseTracingMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("create user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("create user", "status", "started") + return mw.next.CreateUser(ctx, statements, usernameConfig, expiration) +} + +func (mw *databaseTracingMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + defer func(then time.Time) { + mw.logger.Trace("renew user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("renew user", "status", "started") + return mw.next.RenewUser(ctx, statements, username, expiration) +} + +func (mw *databaseTracingMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + defer func(then time.Time) { + mw.logger.Trace("revoke user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("revoke user", "status", "started") + return mw.next.RevokeUser(ctx, statements, username) +} + +func (mw *databaseTracingMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + defer func(then time.Time) { + mw.logger.Trace("rotate root credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("rotate root credentials", "status", "started") + return mw.next.RotateRootCredentials(ctx, statements) +} + +func (mw *databaseTracingMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *databaseTracingMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + defer func(then time.Time) { + mw.logger.Trace("initialize", "status", "finished", "verify", verifyConnection, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("initialize", "status", "started") + return mw.next.Init(ctx, conf, verifyConnection) +} + +func (mw *databaseTracingMiddleware) Close() (err error) { + defer func(then time.Time) { + mw.logger.Trace("close", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("close", "status", "started") + return mw.next.Close() +} + +func (mw *databaseTracingMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("generate credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("generate credentials", "status", "started") + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseTracingMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("set credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("set credentials", "status", "started") + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + +// ---- Metrics Middleware Domain ---- + +// databaseMetricsMiddleware wraps an implementation of Databases and on +// function call logs metrics about this instance. +type databaseMetricsMiddleware struct { + next Database + + typeStr string +} + +func (mw *databaseMetricsMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseMetricsMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "CreateUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "CreateUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1) + return mw.next.CreateUser(ctx, statements, usernameConfig, expiration) +} + +func (mw *databaseMetricsMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RenewUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RenewUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1) + return mw.next.RenewUser(ctx, statements, username, expiration) +} + +func (mw *databaseMetricsMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RevokeUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RevokeUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1) + return mw.next.RevokeUser(ctx, statements, username) +} + +func (mw *databaseMetricsMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RotateRootCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RotateRootCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RotateRootCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RotateRootCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RotateRootCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RotateRootCredentials"}, 1) + return mw.next.RotateRootCredentials(ctx, statements) +} + +func (mw *databaseMetricsMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *databaseMetricsMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Initialize"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Initialize"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1) + return mw.next.Init(ctx, conf, verifyConnection) +} + +func (mw *databaseMetricsMiddleware) Close() (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Close"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Close", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Close"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1) + return mw.next.Close() +} + +func (mw *databaseMetricsMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "GenerateCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "GenerateCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "GenerateCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "GenerateCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials"}, 1) + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseMetricsMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "SetCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "SetCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "SetCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "SetCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials"}, 1) + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + +// ---- Error Sanitizer Middleware Domain ---- + +// DatabaseErrorSanitizerMiddleware wraps an implementation of Databases and +// sanitizes returned error messages +type DatabaseErrorSanitizerMiddleware struct { + l sync.RWMutex + next Database + secretsFn func() map[string]interface{} +} + +func NewDatabaseErrorSanitizerMiddleware(next Database, secretsFn func() map[string]interface{}) *DatabaseErrorSanitizerMiddleware { + return &DatabaseErrorSanitizerMiddleware{ + next: next, + secretsFn: secretsFn, + } +} + +func (mw *DatabaseErrorSanitizerMiddleware) Type() (string, error) { + dbType, err := mw.next.Type() + return dbType, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + username, password, err = mw.next.CreateUser(ctx, statements, usernameConfig, expiration) + return username, password, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + return mw.sanitize(mw.next.RenewUser(ctx, statements, username, expiration)) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + return mw.sanitize(mw.next.RevokeUser(ctx, statements, username)) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + conf, err = mw.next.RotateRootCredentials(ctx, statements) + return conf, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *DatabaseErrorSanitizerMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + saveConf, err = mw.next.Init(ctx, conf, verifyConnection) + return saveConf, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) Close() (err error) { + return mw.sanitize(mw.next.Close()) +} + +// sanitize +func (mw *DatabaseErrorSanitizerMiddleware) sanitize(err error) error { + if err == nil { + return nil + } + if errwrap.ContainsType(err, new(url.Error)) { + return errors.New("unable to parse connection url") + } + if mw.secretsFn != nil { + for k, v := range mw.secretsFn() { + if k == "" { + continue + } + err = errors.New(strings.Replace(err.Error(), k, v.(string), -1)) + } + } + return err +} + +func (mw *DatabaseErrorSanitizerMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + password, err = mw.next.GenerateCredentials(ctx) + return password, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + username, password, err = mw.next.SetCredentials(ctx, statements, staticConfig) + return username, password, mw.sanitize(err) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go new file mode 100644 index 00000000..bfd84802 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go @@ -0,0 +1,358 @@ +package dbplugin + +import ( + "context" + "encoding/json" + "errors" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +var ( + ErrPluginShutdown = errors.New("plugin shutdown") + ErrPluginStaticUnsupported = errors.New("database plugin does not support Static Accounts") +) + +// ---- gRPC Server domain ---- + +type gRPCServer struct { + impl Database +} + +func (s *gRPCServer) Type(context.Context, *Empty) (*TypeResponse, error) { + t, err := s.impl.Type() + if err != nil { + return nil, err + } + + return &TypeResponse{ + Type: t, + }, nil +} + +func (s *gRPCServer) CreateUser(ctx context.Context, req *CreateUserRequest) (*CreateUserResponse, error) { + e, err := ptypes.Timestamp(req.Expiration) + if err != nil { + return nil, err + } + + u, p, err := s.impl.CreateUser(ctx, *req.Statements, *req.UsernameConfig, e) + + return &CreateUserResponse{ + Username: u, + Password: p, + }, err +} + +func (s *gRPCServer) RenewUser(ctx context.Context, req *RenewUserRequest) (*Empty, error) { + e, err := ptypes.Timestamp(req.Expiration) + if err != nil { + return nil, err + } + err = s.impl.RenewUser(ctx, *req.Statements, req.Username, e) + return &Empty{}, err +} + +func (s *gRPCServer) RevokeUser(ctx context.Context, req *RevokeUserRequest) (*Empty, error) { + err := s.impl.RevokeUser(ctx, *req.Statements, req.Username) + return &Empty{}, err +} + +func (s *gRPCServer) RotateRootCredentials(ctx context.Context, req *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { + + resp, err := s.impl.RotateRootCredentials(ctx, req.Statements) + if err != nil { + return nil, err + } + + respConfig, err := json.Marshal(resp) + if err != nil { + return nil, err + } + + return &RotateRootCredentialsResponse{ + Config: respConfig, + }, err +} + +func (s *gRPCServer) Initialize(ctx context.Context, req *InitializeRequest) (*Empty, error) { + _, err := s.Init(ctx, &InitRequest{ + Config: req.Config, + VerifyConnection: req.VerifyConnection, + }) + return &Empty{}, err +} + +func (s *gRPCServer) Init(ctx context.Context, req *InitRequest) (*InitResponse, error) { + config := map[string]interface{}{} + err := json.Unmarshal(req.Config, &config) + if err != nil { + return nil, err + } + + resp, err := s.impl.Init(ctx, config, req.VerifyConnection) + if err != nil { + return nil, err + } + + respConfig, err := json.Marshal(resp) + if err != nil { + return nil, err + } + + return &InitResponse{ + Config: respConfig, + }, err +} + +func (s *gRPCServer) Close(_ context.Context, _ *Empty) (*Empty, error) { + s.impl.Close() + return &Empty{}, nil +} + +func (s *gRPCServer) GenerateCredentials(ctx context.Context, _ *Empty) (*GenerateCredentialsResponse, error) { + p, err := s.impl.GenerateCredentials(ctx) + if err != nil { + return nil, err + } + + return &GenerateCredentialsResponse{ + Password: p, + }, nil +} + +func (s *gRPCServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { + + username, password, err := s.impl.SetCredentials(ctx, *req.Statements, *req.StaticUserConfig) + if err != nil { + return nil, err + } + + return &SetCredentialsResponse{ + Username: username, + Password: password, + }, err +} + +// ---- gRPC client domain ---- + +type gRPCClient struct { + client DatabaseClient + clientConn *grpc.ClientConn + + doneCtx context.Context +} + +func (c *gRPCClient) Type() (string, error) { + resp, err := c.client.Type(c.doneCtx, &Empty{}) + if err != nil { + return "", err + } + + return resp.Type, err +} + +func (c *gRPCClient) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + t, err := ptypes.TimestampProto(expiration) + if err != nil { + return "", "", err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.CreateUser(ctx, &CreateUserRequest{ + Statements: &statements, + UsernameConfig: &usernameConfig, + Expiration: t, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return "", "", ErrPluginShutdown + } + + return "", "", err + } + + return resp.Username, resp.Password, err +} + +func (c *gRPCClient) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) error { + t, err := ptypes.TimestampProto(expiration) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + _, err = c.client.RenewUser(ctx, &RenewUserRequest{ + Statements: &statements, + Username: username, + Expiration: t, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + return err + } + + return nil +} + +func (c *gRPCClient) RevokeUser(ctx context.Context, statements Statements, username string) error { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + _, err := c.client.RevokeUser(ctx, &RevokeUserRequest{ + Statements: &statements, + Username: username, + }) + + if err != nil { + if c.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + return err + } + + return nil +} + +func (c *gRPCClient) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.RotateRootCredentials(ctx, &RotateRootCredentialsRequest{ + Statements: statements, + }) + + if err != nil { + if c.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + + return nil, err + } + + if err := json.Unmarshal(resp.Config, &conf); err != nil { + return nil, err + } + + return conf, nil +} + +func (c *gRPCClient) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, conf, verifyConnection) + return err +} + +func (c *gRPCClient) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) { + configRaw, err := json.Marshal(conf) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.Init(ctx, &InitRequest{ + Config: configRaw, + VerifyConnection: verifyConnection, + }) + if err != nil { + // Fall back to old call if not implemented + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + _, err = c.client.Initialize(ctx, &InitializeRequest{ + Config: configRaw, + VerifyConnection: verifyConnection, + }) + if err == nil { + return conf, nil + } + } + + if c.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + return nil, err + } + + if err := json.Unmarshal(resp.Config, &conf); err != nil { + return nil, err + } + return conf, nil +} + +func (c *gRPCClient) Close() error { + _, err := c.client.Close(c.doneCtx, &Empty{}) + return err +} + +func (c *gRPCClient) GenerateCredentials(ctx context.Context) (string, error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.GenerateCredentials(ctx, &Empty{}) + if err != nil { + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", ErrPluginShutdown + } + return "", err + } + + return resp.Password, nil +} +func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, staticUser StaticUserConfig) (username, password string, err error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.SetCredentials(ctx, &SetCredentialsRequest{ + StaticUserConfig: &staticUser, + Statements: &statements, + }) + + if err != nil { + // Fall back to old call if not implemented + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", "", ErrPluginShutdown + } + return "", "", err + } + + return resp.Username, resp.Password, err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go new file mode 100644 index 00000000..957cf3f4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go @@ -0,0 +1,181 @@ +package dbplugin + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// Database is the interface that all database objects must implement. +type Database interface { + // Type returns the TypeName for the particular database backend + // implementation. This type name is usually set as a constant within the + // database backend implementation, e.g. "mysql" for the MySQL database + // backend. + Type() (string, error) + + // CreateUser is called on `$ vault read database/creds/:role-name` and it's + // also the first time anything is touched from `$ vault write + // database/roles/:role-name`. This is likely to be the highest-throughput + // method for most plugins. + CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) + + // RenewUser is triggered by a renewal call to the API. In many database + // backends, this triggers a call on the underlying database that extends a + // VALID UNTIL clause on a user. However, if no such need exists, setting + // this as a NO-OP means that when renewal is called, the lease renewal time + // is pushed further out as appropriate, thus pushing out the time until the + // RevokeUser method is called. + RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) error + + // RevokeUser is triggered either automatically by a lease expiration, or by + // a revocation call to the API. + RevokeUser(ctx context.Context, statements Statements, username string) error + + // RotateRootCredentials is triggered by a root credential rotation call to + // the API. + RotateRootCredentials(ctx context.Context, statements []string) (config map[string]interface{}, err error) + + // GenerateCredentials returns a generated password for the plugin. This is + // used in combination with SetCredentials to set a specific password for a + // database user and preserve the password in WAL entries. + GenerateCredentials(ctx context.Context) (string, error) + + // SetCredentials uses provided information to create or set the credentials + // for a database user. Unlike CreateUser, this method requires both a + // username and a password given instead of generating them. This is used for + // creating and setting the password of static accounts, as well as rolling + // back passwords in the database in the event an updated database fails to + // save in Vault's storage. + SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username string, password string, err error) + + // Init is called on `$ vault write database/config/:db-name`, or when you + // do a creds call after Vault's been restarted. The config provided won't + // hold all the keys and values provided in the API call, some will be + // stripped by the database engine before the config is provided. The config + // returned will be stored, which will persist it across shutdowns. + Init(ctx context.Context, config map[string]interface{}, verifyConnection bool) (saveConfig map[string]interface{}, err error) + + // Close attempts to close the underlying database connection that was + // established by the backend. + Close() error + + // DEPRECATED: Will be removed in a future plugin version bump. + // Initialize is a backwards-compatible implementation that simply calls + // Init, dropping the saveConfig, and returning the err. + Initialize(ctx context.Context, config map[string]interface{}, verifyConnection bool) (err error) +} + +// PluginFactory is used to build plugin database types. It wraps the database +// object in a logging and metrics middleware. +func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPlugin(ctx, pluginName, consts.PluginTypeDatabase) + if err != nil { + return nil, err + } + + namedLogger := logger.Named(pluginName) + + var transport string + var db Database + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to a Database. + dbRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, errwrap.Wrapf("error initializing plugin: {{err}}", err) + } + + var ok bool + db, ok = dbRaw.(Database) + if !ok { + return nil, fmt.Errorf("unsupported database type: %q", pluginName) + } + + transport = "builtin" + + } else { + // create a DatabasePluginClient instance + db, err = NewPluginClient(ctx, sys, pluginRunner, namedLogger, false) + if err != nil { + return nil, err + } + + // Switch on the underlying database client type to get the transport + // method. + switch db.(*DatabasePluginClient).Database.(type) { + case *gRPCClient: + transport = "gRPC" + } + + } + + typeStr, err := db.Type() + if err != nil { + return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err) + } + + // Wrap with metrics middleware + db = &databaseMetricsMiddleware{ + next: db, + typeStr: typeStr, + } + + // Wrap with tracing middleware + if namedLogger.IsTrace() { + db = &databaseTracingMiddleware{ + next: db, + logger: namedLogger.With("transport", transport), + } + } + + return db, nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 4, + MagicCookieKey: "VAULT_DATABASE_PLUGIN", + MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", +} + +var _ plugin.Plugin = &GRPCDatabasePlugin{} +var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} + +// GRPCDatabasePlugin is the plugin.Plugin implementation that only supports GRPC +// transport +type GRPCDatabasePlugin struct { + Impl Database + + // Embeding this will disable the netRPC protocol + plugin.NetRPCUnsupportedPlugin +} + +func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { + impl := &DatabaseErrorSanitizerMiddleware{ + next: d.Impl, + } + + RegisterDatabaseServer(s, &gRPCServer{impl: impl}) + return nil +} + +func (GRPCDatabasePlugin) GRPCClient(doneCtx context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &gRPCClient{ + client: NewDatabaseClient(c), + clientConn: c, + doneCtx: doneCtx, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go new file mode 100644 index 00000000..00e71e12 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go @@ -0,0 +1,51 @@ +package dbplugin + +import ( + "crypto/tls" + fmt "fmt" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// Serve is called from within a plugin and wraps the provided +// Database implementation in a databasePluginRPCServer object and starts a +// RPC server. +func Serve(db Database, tlsProvider func() (*tls.Config, error)) { + plugin.Serve(ServeConfig(db, tlsProvider)) +} + +func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.ServeConfig { + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return nil + } + + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: plugin.PluginSet{ + "database": &GRPCDatabasePlugin{ + Impl: db, + }, + }, + 4: plugin.PluginSet{ + "database": &GRPCDatabasePlugin{ + Impl: db, + }, + }, + } + + conf := &plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + VersionedPlugins: pluginSets, + TLSProvider: tlsProvider, + GRPCServer: plugin.DefaultGRPCServer, + } + + return conf +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go new file mode 100644 index 00000000..35553d22 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go @@ -0,0 +1,25 @@ +package connutil + +import ( + "context" + "errors" + "sync" +) + +var ( + ErrNotInitialized = errors.New("connection has not been initialized") +) + +// ConnectionProducer can be used as an embedded interface in the Database +// definition. It implements the methods dealing with individual database +// connections and is used in all the builtin database types. +type ConnectionProducer interface { + Close() error + Init(context.Context, map[string]interface{}, bool) (map[string]interface{}, error) + Connection(context.Context) (interface{}, error) + + sync.Locker + + // DEPRECATED, will be removed in 0.12 + Initialize(context.Context, map[string]interface{}, bool) error +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/sql.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/sql.go new file mode 100644 index 00000000..796df8b1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/sql.go @@ -0,0 +1,184 @@ +package connutil + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "strings" + "sync" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/mitchellh/mapstructure" +) + +var _ ConnectionProducer = &SQLConnectionProducer{} + +// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases +type SQLConnectionProducer struct { + ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"` + MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"` + MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` + Username string `json:"username" mapstructure:"username" structs:"username"` + Password string `json:"password" mapstructure:"password" structs:"password"` + + Type string + RawConfig map[string]interface{} + maxConnectionLifetime time.Duration + Initialized bool + db *sql.DB + sync.Mutex +} + +func (c *SQLConnectionProducer) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, conf, verifyConnection) + return err +} + +func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) { + c.Lock() + defer c.Unlock() + + c.RawConfig = conf + + err := mapstructure.WeakDecode(conf, &c) + if err != nil { + return nil, err + } + + if len(c.ConnectionURL) == 0 { + return nil, fmt.Errorf("connection_url cannot be empty") + } + + // Don't escape special characters for MySQL password + password := c.Password + if c.Type != "mysql" { + password = url.PathEscape(c.Password) + } + + // QueryHelper doesn't do any SQL escaping, but if it starts to do so + // then maybe we won't be able to use it to do URL substitution any more. + c.ConnectionURL = dbutil.QueryHelper(c.ConnectionURL, map[string]string{ + "username": url.PathEscape(c.Username), + "password": password, + }) + + if c.MaxOpenConnections == 0 { + c.MaxOpenConnections = 4 + } + + if c.MaxIdleConnections == 0 { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxIdleConnections > c.MaxOpenConnections { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxConnectionLifetimeRaw == nil { + c.MaxConnectionLifetimeRaw = "0s" + } + + c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) + if err != nil { + return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err) + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(ctx); err != nil { + return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + } + + if err := c.db.PingContext(ctx); err != nil { + return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + } + } + + return c.RawConfig, nil +} + +func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, error) { + if !c.Initialized { + return nil, ErrNotInitialized + } + + // If we already have a DB, test it and return + if c.db != nil { + if err := c.db.PingContext(ctx); err == nil { + return c.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + c.db.Close() + } + + // For mssql backend, switch to sqlserver instead + dbType := c.Type + if c.Type == "mssql" { + dbType = "sqlserver" + } + + // Otherwise, attempt to make connection + conn := c.ConnectionURL + + // Ensure timezone is set to UTC for all the connections + if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { + if strings.Contains(conn, "?") { + conn += "&timezone=utc" + } else { + conn += "?timezone=utc" + } + } + + var err error + c.db, err = sql.Open(dbType, conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + c.db.SetMaxOpenConns(c.MaxOpenConnections) + c.db.SetMaxIdleConns(c.MaxIdleConnections) + c.db.SetConnMaxLifetime(c.maxConnectionLifetime) + + return c.db, nil +} + +func (c *SQLConnectionProducer) SecretValues() map[string]interface{} { + return map[string]interface{}{ + c.Password: "[password]", + } +} + +// Close attempts to close the connection +func (c *SQLConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.db != nil { + c.db.Close() + } + + c.db = nil + + return nil +} + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *SQLConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go new file mode 100644 index 00000000..12b744fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go @@ -0,0 +1,48 @@ +package credsutil + +import ( + "context" + "time" + + "fmt" + + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/helper/base62" +) + +// CredentialsProducer can be used as an embedded interface in the Database +// definition. It implements the methods for generating user information for a +// particular database type and is used in all the builtin database types. +type CredentialsProducer interface { + GenerateCredentials(context.Context) (string, error) + GenerateUsername(dbplugin.UsernameConfig) (string, error) + GeneratePassword() (string, error) + GenerateExpiration(time.Time) (string, error) +} + +const ( + reqStr = `A1a-` + minStrLen = 10 +) + +// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-] +// of the provided length. The string generated takes up to 4 characters +// of space that are predefined and prepended to ensure password +// character requirements. It also requires a min length of 10 characters. +func RandomAlphaNumeric(length int, prependA1a bool) (string, error) { + if length < minStrLen { + return "", fmt.Errorf("minimum length of %d is required", minStrLen) + } + + var prefix string + if prependA1a { + prefix = reqStr + } + + randomStr, err := base62.Random(length - len(prefix)) + if err != nil { + return "", err + } + + return prefix + randomStr, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/sql.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/sql.go new file mode 100644 index 00000000..748b504e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/sql.go @@ -0,0 +1,81 @@ +package credsutil + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/database/dbplugin" +) + +const ( + NoneLength int = -1 +) + +// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types. +type SQLCredentialsProducer struct { + DisplayNameLen int + RoleNameLen int + UsernameLen int + Separator string +} + +func (scp *SQLCredentialsProducer) GenerateCredentials(ctx context.Context) (string, error) { + password, err := scp.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) { + username := "v" + + displayName := config.DisplayName + if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen { + displayName = displayName[:scp.DisplayNameLen] + } else if scp.DisplayNameLen == NoneLength { + displayName = "" + } + + if len(displayName) > 0 { + username = fmt.Sprintf("%s%s%s", username, scp.Separator, displayName) + } + + roleName := config.RoleName + if scp.RoleNameLen > 0 && len(roleName) > scp.RoleNameLen { + roleName = roleName[:scp.RoleNameLen] + } else if scp.RoleNameLen == NoneLength { + roleName = "" + } + + if len(roleName) > 0 { + username = fmt.Sprintf("%s%s%s", username, scp.Separator, roleName) + } + + userUUID, err := RandomAlphaNumeric(20, false) + if err != nil { + return "", err + } + + username = fmt.Sprintf("%s%s%s", username, scp.Separator, userUUID) + username = fmt.Sprintf("%s%s%s", username, scp.Separator, fmt.Sprint(time.Now().Unix())) + if scp.UsernameLen > 0 && len(username) > scp.UsernameLen { + username = username[:scp.UsernameLen] + } + + return username, nil +} + +func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) { + password, err := RandomAlphaNumeric(20, true) + if err != nil { + return "", err + } + + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) { + return ttl.Format("2006-01-02 15:04:05-0700"), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/dbutil/dbutil.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/dbutil/dbutil.go new file mode 100644 index 00000000..84b98d18 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/dbutil/dbutil.go @@ -0,0 +1,60 @@ +package dbutil + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/database/dbplugin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrEmptyCreationStatement = errors.New("empty creation statements") + ErrEmptyRotationStatement = errors.New("empty rotation statements") +) + +// Query templates a query for us. +func QueryHelper(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) + } + + return tpl +} + +// StatementCompatibilityHelper will populate the statements fields to support +// compatibility +func StatementCompatibilityHelper(statements dbplugin.Statements) dbplugin.Statements { + switch { + case len(statements.Creation) > 0 && len(statements.CreationStatements) == 0: + statements.CreationStatements = strings.Join(statements.Creation, ";") + case len(statements.CreationStatements) > 0: + statements.Creation = []string{statements.CreationStatements} + } + switch { + case len(statements.Revocation) > 0 && len(statements.RevocationStatements) == 0: + statements.RevocationStatements = strings.Join(statements.Revocation, ";") + case len(statements.RevocationStatements) > 0: + statements.Revocation = []string{statements.RevocationStatements} + } + switch { + case len(statements.Renewal) > 0 && len(statements.RenewStatements) == 0: + statements.RenewStatements = strings.Join(statements.Renewal, ";") + case len(statements.RenewStatements) > 0: + statements.Renewal = []string{statements.RenewStatements} + } + switch { + case len(statements.Rollback) > 0 && len(statements.RollbackStatements) == 0: + statements.RollbackStatements = strings.Join(statements.Rollback, ";") + case len(statements.RollbackStatements) > 0: + statements.Rollback = []string{statements.RollbackStatements} + } + return statements +} + +// Unimplemented returns a gRPC error with the Unimplemented code +func Unimplemented() error { + return status.Error(codes.Unimplemented, "Not yet implemented") +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/backend.go b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go new file mode 100644 index 00000000..a92025c7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go @@ -0,0 +1,658 @@ +package framework + +import ( + "context" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "sort" + "strings" + "sync" + "time" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/entropy" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// Backend is an implementation of logical.Backend that allows +// the implementer to code a backend using a much more programmer-friendly +// framework that handles a lot of the routing and validation for you. +// +// This is recommended over implementing logical.Backend directly. +type Backend struct { + // Help is the help text that is shown when a help request is made + // on the root of this resource. The root help is special since we + // show all the paths that can be requested. + Help string + + // Paths are the various routes that the backend responds to. + // This cannot be modified after construction (i.e. dynamically changing + // paths, including adding or removing, is not allowed once the + // backend is in use). + // + // PathsSpecial is the list of path patterns that denote the + // paths above that require special privileges. These can't be + // regular expressions, it is either exact match or prefix match. + // For prefix match, append '*' as a suffix. + Paths []*Path + PathsSpecial *logical.Paths + + // Secrets is the list of secret types that this backend can + // return. It is used to automatically generate proper responses, + // and ease specifying callbacks for revocation, renewal, etc. + Secrets []*Secret + + // InitializeFunc is the callback, which if set, will be invoked via + // Initialize() just after a plugin has been mounted. + InitializeFunc InitializeFunc + + // PeriodicFunc is the callback, which if set, will be invoked when the + // periodic timer of RollbackManager ticks. This can be used by + // backends to do anything it wishes to do periodically. + // + // PeriodicFunc can be invoked to, say to periodically delete stale + // entries in backend's storage, while the backend is still being used. + // (Note the different of this action from what `Clean` does, which is + // invoked just before the backend is unmounted). + PeriodicFunc periodicFunc + + // WALRollback is called when a WAL entry (see wal.go) has to be rolled + // back. It is called with the data from the entry. + // + // WALRollbackMinAge is the minimum age of a WAL entry before it is attempted + // to be rolled back. This should be longer than the maximum time it takes + // to successfully create a secret. + WALRollback WALRollbackFunc + WALRollbackMinAge time.Duration + + // Clean is called on unload to clean up e.g any existing connections + // to the backend, if required. + Clean CleanupFunc + + // Invalidate is called when a keys is modified if required + Invalidate InvalidateFunc + + // AuthRenew is the callback to call when a RenewRequest for an + // authentication comes in. By default, renewal won't be allowed. + // See the built-in AuthRenew helpers in lease.go for common callbacks. + AuthRenew OperationFunc + + // Type is the logical.BackendType for the backend implementation + BackendType logical.BackendType + + logger log.Logger + system logical.SystemView + once sync.Once + pathsRe []*regexp.Regexp +} + +// periodicFunc is the callback called when the RollbackManager's timer ticks. +// This can be utilized by the backends to do anything it wants. +type periodicFunc func(context.Context, *logical.Request) error + +// OperationFunc is the callback called for an operation on a path. +type OperationFunc func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) + +// ExistenceFunc is the callback called for an existence check on a path. +type ExistenceFunc func(context.Context, *logical.Request, *FieldData) (bool, error) + +// WALRollbackFunc is the callback for rollbacks. +type WALRollbackFunc func(context.Context, *logical.Request, string, interface{}) error + +// CleanupFunc is the callback for backend unload. +type CleanupFunc func(context.Context) + +// InvalidateFunc is the callback for backend key invalidation. +type InvalidateFunc func(context.Context, string) + +// InitializeFunc is the callback, which if set, will be invoked via +// Initialize() just after a plugin has been mounted. +type InitializeFunc func(context.Context, *logical.InitializationRequest) error + +// Initialize is the logical.Backend implementation. +func (b *Backend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + if b.InitializeFunc != nil { + return b.InitializeFunc(ctx, req) + } + return nil +} + +// HandleExistenceCheck is the logical.Backend implementation. +func (b *Backend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (checkFound bool, exists bool, err error) { + b.once.Do(b.init) + + // Ensure we are only doing this when one of the correct operations is in play + switch req.Operation { + case logical.CreateOperation: + case logical.UpdateOperation: + default: + return false, false, fmt.Errorf("incorrect operation type %v for an existence check", req.Operation) + } + + // Find the matching route + path, captures := b.route(req.Path) + if path == nil { + return false, false, logical.ErrUnsupportedPath + } + + if path.ExistenceCheck == nil { + return false, false, nil + } + + checkFound = true + + // Build up the data for the route, with the URL taking priority + // for the fields over the PUT data. + raw := make(map[string]interface{}, len(path.Fields)) + for k, v := range req.Data { + raw[k] = v + } + for k, v := range captures { + raw[k] = v + } + + fd := FieldData{ + Raw: raw, + Schema: path.Fields} + + err = fd.Validate() + if err != nil { + return false, false, errutil.UserError{Err: err.Error()} + } + + // Call the callback with the request and the data + exists, err = path.ExistenceCheck(ctx, req, &fd) + return +} + +// HandleRequest is the logical.Backend implementation. +func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + b.once.Do(b.init) + + // Check for special cased global operations. These don't route + // to a specific Path. + switch req.Operation { + case logical.RenewOperation: + fallthrough + case logical.RevokeOperation: + return b.handleRevokeRenew(ctx, req) + case logical.RollbackOperation: + return b.handleRollback(ctx, req) + } + + // If the path is empty and it is a help operation, handle that. + if req.Path == "" && req.Operation == logical.HelpOperation { + return b.handleRootHelp() + } + + // Find the matching route + path, captures := b.route(req.Path) + if path == nil { + return nil, logical.ErrUnsupportedPath + } + + // Check if a feature is required and if the license has that feature + if path.FeatureRequired != license.FeatureNone { + hasFeature := b.system.HasFeature(path.FeatureRequired) + if !hasFeature { + return nil, logical.CodedError(401, "Feature Not Enabled") + } + } + + // Build up the data for the route, with the URL taking priority + // for the fields over the PUT data. + raw := make(map[string]interface{}, len(path.Fields)) + for k, v := range req.Data { + raw[k] = v + } + for k, v := range captures { + raw[k] = v + } + + // Look up the callback for this operation, preferring the + // path.Operations definition if present. + var callback OperationFunc + + if path.Operations != nil { + if op, ok := path.Operations[req.Operation]; ok { + + // Check whether this operation should be forwarded + if sysView := b.System(); sysView != nil { + replState := sysView.ReplicationState() + props := op.Properties() + + if props.ForwardPerformanceStandby && replState.HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + if props.ForwardPerformanceSecondary && !sysView.LocalMount() && replState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + } + + callback = op.Handler() + } + } else { + callback = path.Callbacks[req.Operation] + } + ok := callback != nil + + if !ok { + if req.Operation == logical.HelpOperation { + callback = path.helpCallback(b) + ok = true + } + } + if !ok { + return nil, logical.ErrUnsupportedOperation + } + + fd := FieldData{ + Raw: raw, + Schema: path.Fields} + + if req.Operation != logical.HelpOperation { + err := fd.Validate() + if err != nil { + return nil, err + } + } + + return callback(ctx, req, &fd) +} + +// SpecialPaths is the logical.Backend implementation. +func (b *Backend) SpecialPaths() *logical.Paths { + return b.PathsSpecial +} + +// Cleanup is used to release resources and prepare to stop the backend +func (b *Backend) Cleanup(ctx context.Context) { + if b.Clean != nil { + b.Clean(ctx) + } +} + +// InvalidateKey is used to clear caches and reset internal state on key changes +func (b *Backend) InvalidateKey(ctx context.Context, key string) { + if b.Invalidate != nil { + b.Invalidate(ctx, key) + } +} + +// Setup is used to initialize the backend with the initial backend configuration +func (b *Backend) Setup(ctx context.Context, config *logical.BackendConfig) error { + b.logger = config.Logger + b.system = config.System + return nil +} + +// GetRandomReader returns an io.Reader to use for generating key material in +// backends. If the backend has access to an external entropy source it will +// return that, otherwise it returns crypto/rand.Reader. +func (b *Backend) GetRandomReader() io.Reader { + if sourcer, ok := b.System().(entropy.Sourcer); ok { + return entropy.NewReader(sourcer) + } + + return rand.Reader +} + +// Logger can be used to get the logger. If no logger has been set, +// the logs will be discarded. +func (b *Backend) Logger() log.Logger { + if b.logger != nil { + return b.logger + } + + return logging.NewVaultLoggerWithWriter(ioutil.Discard, log.NoLevel) +} + +// System returns the backend's system view. +func (b *Backend) System() logical.SystemView { + return b.system +} + +// Type returns the backend type +func (b *Backend) Type() logical.BackendType { + return b.BackendType +} + +// Route looks up the path that would be used for a given path string. +func (b *Backend) Route(path string) *Path { + result, _ := b.route(path) + return result +} + +// Secret is used to look up the secret with the given type. +func (b *Backend) Secret(k string) *Secret { + for _, s := range b.Secrets { + if s.Type == k { + return s + } + } + + return nil +} + +func (b *Backend) init() { + b.pathsRe = make([]*regexp.Regexp, len(b.Paths)) + for i, p := range b.Paths { + if len(p.Pattern) == 0 { + panic(fmt.Sprintf("Routing pattern cannot be blank")) + } + // Automatically anchor the pattern + if p.Pattern[0] != '^' { + p.Pattern = "^" + p.Pattern + } + if p.Pattern[len(p.Pattern)-1] != '$' { + p.Pattern = p.Pattern + "$" + } + b.pathsRe[i] = regexp.MustCompile(p.Pattern) + } +} + +func (b *Backend) route(path string) (*Path, map[string]string) { + b.once.Do(b.init) + + for i, re := range b.pathsRe { + matches := re.FindStringSubmatch(path) + if matches == nil { + continue + } + + // We have a match, determine the mapping of the captures and + // store that for returning. + var captures map[string]string + path := b.Paths[i] + if captureNames := re.SubexpNames(); len(captureNames) > 1 { + captures = make(map[string]string, len(captureNames)) + for i, name := range captureNames { + if name != "" { + captures[name] = matches[i] + } + } + } + + return path, captures + } + + return nil, nil +} + +func (b *Backend) handleRootHelp() (*logical.Response, error) { + // Build a mapping of the paths and get the paths alphabetized to + // make the output prettier. + pathsMap := make(map[string]*Path) + paths := make([]string, 0, len(b.Paths)) + for i, p := range b.pathsRe { + paths = append(paths, p.String()) + pathsMap[p.String()] = b.Paths[i] + } + sort.Strings(paths) + + // Build the path data + pathData := make([]rootHelpTemplatePath, 0, len(paths)) + for _, route := range paths { + p := pathsMap[route] + pathData = append(pathData, rootHelpTemplatePath{ + Path: route, + Help: strings.TrimSpace(p.HelpSynopsis), + }) + } + + help, err := executeTemplate(rootHelpTemplate, &rootHelpTemplateData{ + Help: strings.TrimSpace(b.Help), + Paths: pathData, + }) + if err != nil { + return nil, err + } + + // Build OpenAPI response for the entire backend + doc := NewOASDocument() + if err := documentPaths(b, doc); err != nil { + b.Logger().Warn("error generating OpenAPI", "error", err) + } + + return logical.HelpResponse(help, nil, doc), nil +} + +func (b *Backend) handleRevokeRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // Special case renewal of authentication for credential backends + if req.Operation == logical.RenewOperation && req.Auth != nil { + return b.handleAuthRenew(ctx, req) + } + + if req.Secret == nil { + return nil, fmt.Errorf("request has no secret") + } + + rawSecretType, ok := req.Secret.InternalData["secret_type"] + if !ok { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + secretType, ok := rawSecretType.(string) + if !ok { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + + secret := b.Secret(secretType) + if secret == nil { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + + switch req.Operation { + case logical.RenewOperation: + return secret.HandleRenew(ctx, req) + case logical.RevokeOperation: + return secret.HandleRevoke(ctx, req) + default: + return nil, fmt.Errorf("invalid operation for revoke/renew: %q", req.Operation) + } +} + +// handleRollback invokes the PeriodicFunc set on the backend. It also does a +// WAL rollback operation. +func (b *Backend) handleRollback(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // Response is not expected from the periodic operation. + var resp *logical.Response + + merr := new(multierror.Error) + if b.PeriodicFunc != nil { + if err := b.PeriodicFunc(ctx, req); err != nil { + merr = multierror.Append(merr, err) + } + } + + if b.WALRollback != nil { + var err error + resp, err = b.handleWALRollback(ctx, req) + if err != nil { + merr = multierror.Append(merr, err) + } + } + return resp, merr.ErrorOrNil() +} + +func (b *Backend) handleAuthRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.AuthRenew == nil { + return logical.ErrorResponse("this auth type doesn't support renew"), nil + } + + return b.AuthRenew(ctx, req, nil) +} + +func (b *Backend) handleWALRollback(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.WALRollback == nil { + return nil, logical.ErrUnsupportedOperation + } + + var merr error + keys, err := ListWAL(ctx, req.Storage) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + if len(keys) == 0 { + return nil, nil + } + + // Calculate the minimum time that the WAL entries could be + // created in order to be rolled back. + age := b.WALRollbackMinAge + if age == 0 { + age = 10 * time.Minute + } + minAge := time.Now().Add(-1 * age) + if _, ok := req.Data["immediate"]; ok { + minAge = time.Now().Add(1000 * time.Hour) + } + + for _, k := range keys { + entry, err := GetWAL(ctx, req.Storage, k) + if err != nil { + merr = multierror.Append(merr, err) + continue + } + if entry == nil { + continue + } + + // If the entry isn't old enough, then don't roll it back + if !time.Unix(entry.CreatedAt, 0).Before(minAge) { + continue + } + + // Attempt a WAL rollback + err = b.WALRollback(ctx, req, entry.Kind, entry.Data) + if err != nil { + err = errwrap.Wrapf(fmt.Sprintf("error rolling back %q entry: {{err}}", entry.Kind), err) + } + if err == nil { + err = DeleteWAL(ctx, req.Storage, k) + } + if err != nil { + merr = multierror.Append(merr, err) + } + } + + if merr == nil { + return nil, nil + } + + return logical.ErrorResponse(merr.Error()), nil +} + +// FieldSchema is a basic schema to describe the format of a path field. +type FieldSchema struct { + Type FieldType + Default interface{} + Description string + Required bool + Deprecated bool + + // Query indicates this field will be sent as a query parameter: + // + // /v1/foo/bar?some_param=some_value + // + // It doesn't affect handling of the value, but may be used for documentation. + Query bool + + // AllowedValues is an optional list of permitted values for this field. + // This constraint is not (yet) enforced by the framework, but the list is + // output as part of OpenAPI generation and may effect documentation and + // dynamic UI generation. + AllowedValues []interface{} + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +// DefaultOrZero returns the default value if it is set, or otherwise +// the zero value of the type. +func (s *FieldSchema) DefaultOrZero() interface{} { + if s.Default != nil { + switch s.Type { + case TypeDurationSecond, TypeSignedDurationSecond: + resultDur, err := parseutil.ParseDurationSecond(s.Default) + if err != nil { + return s.Type.Zero() + } + return int(resultDur.Seconds()) + + default: + return s.Default + } + } + + return s.Type.Zero() +} + +// Zero returns the correct zero-value for a specific FieldType +func (t FieldType) Zero() interface{} { + switch t { + case TypeString, TypeNameString, TypeLowerCaseString: + return "" + case TypeInt: + return 0 + case TypeBool: + return false + case TypeMap: + return map[string]interface{}{} + case TypeKVPairs: + return map[string]string{} + case TypeDurationSecond, TypeSignedDurationSecond: + return 0 + case TypeSlice: + return []interface{}{} + case TypeStringSlice, TypeCommaStringSlice: + return []string{} + case TypeCommaIntSlice: + return []int{} + case TypeHeader: + return http.Header{} + default: + panic("unknown type: " + t.String()) + } +} + +type rootHelpTemplateData struct { + Help string + Paths []rootHelpTemplatePath +} + +type rootHelpTemplatePath struct { + Path string + Help string +} + +const rootHelpTemplate = ` +## DESCRIPTION + +{{.Help}} + +## PATHS + +The following paths are supported by this backend. To view help for +any of the paths below, use the help command with any route matching +the path pattern. Note that depending on the policy of your auth token, +you may or may not be able to access certain paths. + +{{range .Paths}}{{indent 4 .Path}} +{{indent 8 .Help}} + +{{end}} + +` diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/field_data.go b/vendor/github.com/hashicorp/vault/sdk/framework/field_data.go new file mode 100644 index 00000000..46b120c4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/field_data.go @@ -0,0 +1,396 @@ +package framework + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/mitchellh/mapstructure" +) + +// FieldData is the structure passed to the callback to handle a path +// containing the populated parameters for fields. This should be used +// instead of the raw (*vault.Request).Data to access data in a type-safe +// way. +type FieldData struct { + Raw map[string]interface{} + Schema map[string]*FieldSchema +} + +// Validate cycles through raw data and validate conversions in +// the schema, so we don't get an error/panic later when +// trying to get data out. Data not in the schema is not +// an error at this point, so we don't worry about it. +func (d *FieldData) Validate() error { + for field, value := range d.Raw { + + schema, ok := d.Schema[field] + if !ok { + continue + } + + switch schema.Type { + case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + TypeKVPairs, TypeCommaIntSlice, TypeHeader: + _, _, err := d.getPrimitive(field, schema) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error converting input %v for field %q: {{err}}", value, field), err) + } + default: + return fmt.Errorf("unknown field type %q for field %q", schema.Type, field) + } + } + + return nil +} + +// Get gets the value for the given field. If the key is an invalid field, +// FieldData will panic. If you want a safer version of this method, use +// GetOk. If the field k is not set, the default value (if set) will be +// returned, otherwise the zero value will be returned. +func (d *FieldData) Get(k string) interface{} { + schema, ok := d.Schema[k] + if !ok { + panic(fmt.Sprintf("field %s not in the schema", k)) + } + + // If the value can't be decoded, use the zero or default value for the field + // type + value, ok := d.GetOk(k) + if !ok || value == nil { + value = schema.DefaultOrZero() + } + + return value +} + +// GetDefaultOrZero gets the default value set on the schema for the given +// field. If there is no default value set, the zero value of the type +// will be returned. +func (d *FieldData) GetDefaultOrZero(k string) interface{} { + schema, ok := d.Schema[k] + if !ok { + panic(fmt.Sprintf("field %s not in the schema", k)) + } + + return schema.DefaultOrZero() +} + +// GetFirst gets the value for the given field names, in order from first +// to last. This can be useful for fields with a current name, and one or +// more deprecated names. The second return value will be false if the keys +// are invalid or the keys are not set at all. +func (d *FieldData) GetFirst(k ...string) (interface{}, bool) { + for _, v := range k { + if result, ok := d.GetOk(v); ok { + return result, ok + } + } + return nil, false +} + +// GetOk gets the value for the given field. The second return value will be +// false if the key is invalid or the key is not set at all. If the field k is +// set and the decoded value is nil, the default or zero value +// will be returned instead. +func (d *FieldData) GetOk(k string) (interface{}, bool) { + schema, ok := d.Schema[k] + if !ok { + return nil, false + } + + result, ok, err := d.GetOkErr(k) + if err != nil { + panic(fmt.Sprintf("error reading %s: %s", k, err)) + } + + if ok && result == nil { + result = schema.DefaultOrZero() + } + + return result, ok +} + +// GetOkErr is the most conservative of all the Get methods. It returns +// whether key is set or not, but also an error value. The error value is +// non-nil if the field doesn't exist or there was an error parsing the +// field value. +func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) { + schema, ok := d.Schema[k] + if !ok { + return nil, false, fmt.Errorf("unknown field: %q", k) + } + + switch schema.Type { + case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + TypeKVPairs, TypeCommaIntSlice, TypeHeader: + return d.getPrimitive(k, schema) + default: + return nil, false, + fmt.Errorf("unknown field type %q for field %q", schema.Type, k) + } +} + +func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bool, error) { + raw, ok := d.Raw[k] + if !ok { + return nil, false, nil + } + + switch t := schema.Type; t { + case TypeBool: + var result bool + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeInt: + var result int + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeLowerCaseString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return strings.ToLower(result), true, nil + + case TypeNameString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result) + if err != nil { + return nil, false, err + } + if !matched { + return nil, false, errors.New("field does not match the formatting rules") + } + return result, true, nil + + case TypeMap: + var result map[string]interface{} + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeDurationSecond, TypeSignedDurationSecond: + var result int + switch inp := raw.(type) { + case nil: + return nil, false, nil + default: + dur, err := parseutil.ParseDurationSecond(inp) + if err != nil { + return nil, false, err + } + result = int(dur.Seconds()) + } + if t == TypeDurationSecond && result < 0 { + return nil, false, fmt.Errorf("cannot provide negative value '%d'", result) + } + return result, true, nil + + case TypeCommaIntSlice: + var result []int + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, false, err + } + if err := decoder.Decode(raw); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]int, 0), true, nil + } + return result, true, nil + + case TypeSlice: + var result []interface{} + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]interface{}, 0), true, nil + } + return result, true, nil + + case TypeStringSlice: + rawString, ok := raw.(string) + if ok && rawString == "" { + return []string{}, true, nil + } + + var result []string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]string, 0), true, nil + } + return strutil.TrimStrings(result), true, nil + + case TypeCommaStringSlice: + res, err := parseutil.ParseCommaStringSlice(raw) + if err != nil { + return nil, false, err + } + return res, true, nil + + case TypeKVPairs: + // First try to parse this as a map + var mapResult map[string]string + if err := mapstructure.WeakDecode(raw, &mapResult); err == nil { + return mapResult, true, nil + } + + // If map parse fails, parse as a string list of = delimited pairs + var listResult []string + if err := mapstructure.WeakDecode(raw, &listResult); err != nil { + return nil, false, err + } + + result := make(map[string]string, len(listResult)) + for _, keyPair := range listResult { + keyPairSlice := strings.SplitN(keyPair, "=", 2) + if len(keyPairSlice) != 2 || keyPairSlice[0] == "" { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + result[keyPairSlice[0]] = keyPairSlice[1] + } + return result, true, nil + + case TypeHeader: + /* + + There are multiple ways a header could be provided: + + 1. As a map[string]interface{} that resolves to a map[string]string or map[string][]string, or a mix of both + because that's permitted for headers. + This mainly comes from the API. + + 2. As a string... + a. That contains JSON that originally was JSON, but then was base64 encoded. + b. That contains JSON, ex. `{"content-type":"text/json","accept":["encoding/json"]}`. + This mainly comes from the API and is used to save space while sending in the header. + + 3. As an array of strings that contains comma-delimited key-value pairs associated via a colon, + ex: `content-type:text/json`,`accept:encoding/json`. + This mainly comes from the CLI. + + We go through these sequentially below. + + */ + result := http.Header{} + + toHeader := func(resultMap map[string]interface{}) (http.Header, error) { + header := http.Header{} + for headerKey, headerValGroup := range resultMap { + switch typedHeader := headerValGroup.(type) { + case string: + header.Add(headerKey, typedHeader) + case []string: + for _, headerVal := range typedHeader { + header.Add(headerKey, headerVal) + } + case json.Number: + header.Add(headerKey, typedHeader.String()) + case []interface{}: + for _, headerVal := range typedHeader { + switch typedHeader := headerVal.(type) { + case string: + header.Add(headerKey, typedHeader) + case json.Number: + header.Add(headerKey, typedHeader.String()) + default: + // All header values should already be strings when they're being sent in. + // Even numbers and booleans will be treated as strings. + return nil, fmt.Errorf("received non-string value for header key:%s, val:%s", headerKey, headerValGroup) + } + } + default: + return nil, fmt.Errorf("unrecognized type for %s", headerValGroup) + } + } + return header, nil + } + + resultMap := make(map[string]interface{}) + + // 1. Are we getting a map from the API? + if err := mapstructure.WeakDecode(raw, &resultMap); err == nil { + result, err = toHeader(resultMap) + if err != nil { + return nil, false, err + } + return result, true, nil + } + + // 2. Are we getting a JSON string? + if headerStr, ok := raw.(string); ok { + // a. Is it base64 encoded? + headerBytes, err := base64.StdEncoding.DecodeString(headerStr) + if err != nil { + // b. It's not base64 encoded, it's a straight-out JSON string. + headerBytes = []byte(headerStr) + } + if err := jsonutil.DecodeJSON(headerBytes, &resultMap); err != nil { + return nil, false, err + } + result, err = toHeader(resultMap) + if err != nil { + return nil, false, err + } + return result, true, nil + } + + // 3. Are we getting an array of fields like "content-type:encoding/json" from the CLI? + var keyPairs []interface{} + if err := mapstructure.WeakDecode(raw, &keyPairs); err == nil { + for _, keyPairIfc := range keyPairs { + keyPair, ok := keyPairIfc.(string) + if !ok { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + keyPairSlice := strings.SplitN(keyPair, ":", 2) + if len(keyPairSlice) != 2 || keyPairSlice[0] == "" { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + result.Add(keyPairSlice[0], keyPairSlice[1]) + } + return result, true, nil + } + return nil, false, fmt.Errorf("%s not provided an expected format", raw) + + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/field_type.go b/vendor/github.com/hashicorp/vault/sdk/framework/field_type.go new file mode 100644 index 00000000..2e1121ca --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/field_type.go @@ -0,0 +1,83 @@ +package framework + +// FieldType is the enum of types that a field can be. +type FieldType uint + +const ( + TypeInvalid FieldType = 0 + TypeString FieldType = iota + TypeInt + TypeBool + TypeMap + + // TypeDurationSecond represent as seconds, this can be either an + // integer or go duration format string (e.g. 24h) + TypeDurationSecond + + // TypeSignedDurationSecond represents a positive or negative duration + // as seconds, this can be either an integer or go duration format + // string (e.g. 24h). + TypeSignedDurationSecond + + // TypeSlice represents a slice of any type + TypeSlice + + // TypeStringSlice is a helper for TypeSlice that returns a sanitized + // slice of strings + TypeStringSlice + + // TypeCommaStringSlice is a helper for TypeSlice that returns a sanitized + // slice of strings and also supports parsing a comma-separated list in + // a string field + TypeCommaStringSlice + + // TypeLowerCaseString is a helper for TypeString that returns a lowercase + // version of the provided string + TypeLowerCaseString + + // TypeNameString represents a name that is URI safe and follows specific + // rules. These rules include start and end with an alphanumeric + // character and characters in the middle can be alphanumeric or . or -. + TypeNameString + + // TypeKVPairs allows you to represent the data as a map or a list of + // equal sign delimited key pairs + TypeKVPairs + + // TypeCommaIntSlice is a helper for TypeSlice that returns a sanitized + // slice of Ints + TypeCommaIntSlice + + // TypeHeader is a helper for sending request headers through to Vault. + // For instance, the AWS and AliCloud credential plugins both act as a + // benevolent MITM for a request, and the headers are sent through and + // parsed. + TypeHeader +) + +func (t FieldType) String() string { + switch t { + case TypeString: + return "string" + case TypeLowerCaseString: + return "lowercase string" + case TypeNameString: + return "name string" + case TypeInt: + return "int" + case TypeBool: + return "bool" + case TypeMap: + return "map" + case TypeKVPairs: + return "keypair" + case TypeDurationSecond, TypeSignedDurationSecond: + return "duration (sec)" + case TypeSlice, TypeStringSlice, TypeCommaStringSlice, TypeCommaIntSlice: + return "slice" + case TypeHeader: + return "header" + default: + return "unknown type" + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/lease.go b/vendor/github.com/hashicorp/vault/sdk/framework/lease.go new file mode 100644 index 00000000..f5c68b84 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/lease.go @@ -0,0 +1,125 @@ +package framework + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// LeaseExtend is left for backwards compatibility for plugins. This function +// now just passes back the data that was passed into it to be processed in core. +// DEPRECATED +func LeaseExtend(backendIncrement, backendMax time.Duration, systemView logical.SystemView) OperationFunc { + return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + switch { + case req.Auth != nil: + req.Auth.TTL = backendIncrement + req.Auth.MaxTTL = backendMax + return &logical.Response{Auth: req.Auth}, nil + case req.Secret != nil: + req.Secret.TTL = backendIncrement + req.Secret.MaxTTL = backendMax + return &logical.Response{Secret: req.Secret}, nil + } + return nil, fmt.Errorf("no lease options for request") + } +} + +// CalculateTTL takes all the user-specified, backend, and system inputs and calculates +// a TTL for a lease +func CalculateTTL(sysView logical.SystemView, increment, backendTTL, period, backendMaxTTL, explicitMaxTTL time.Duration, startTime time.Time) (ttl time.Duration, warnings []string, errors error) { + // Truncate all times to the second since that is the lowest precision for + // TTLs + now := time.Now().Truncate(time.Second) + if startTime.IsZero() { + startTime = now + } else { + startTime = startTime.Truncate(time.Second) + } + + // Use the mount's configured max unless the backend specifies + // something more restrictive (perhaps from a role configuration + // parameter) + maxTTL := sysView.MaxLeaseTTL() + if backendMaxTTL > 0 && backendMaxTTL < maxTTL { + maxTTL = backendMaxTTL + } + if explicitMaxTTL > 0 && explicitMaxTTL < maxTTL { + maxTTL = explicitMaxTTL + } + + // Should never happen, but guard anyways + if maxTTL <= 0 { + return 0, nil, fmt.Errorf("max TTL must be greater than zero") + } + + var maxValidTime time.Time + switch { + case period > 0: + // Cap the period value to the sys max_ttl value + if period > maxTTL { + warnings = append(warnings, + fmt.Sprintf("period of %q exceeded the effective max_ttl of %q; period value is capped accordingly", + humanDuration(period), humanDuration(maxTTL))) + period = maxTTL + } + ttl = period + + if explicitMaxTTL > 0 { + maxValidTime = startTime.Add(explicitMaxTTL) + } + default: + switch { + case increment > 0: + ttl = increment + case backendTTL > 0: + ttl = backendTTL + default: + ttl = sysView.DefaultLeaseTTL() + } + + // We cannot go past this time + maxValidTime = startTime.Add(maxTTL) + } + + if !maxValidTime.IsZero() { + // Determine the max valid TTL + maxValidTTL := maxValidTime.Sub(now) + + // If we are past the max TTL, we shouldn't be in this function...but + // fast path out if we are + if maxValidTTL < 0 { + return 0, nil, fmt.Errorf("past the max TTL, cannot renew") + } + + // If the proposed expiration is after the maximum TTL of the lease, + // cap the increment to whatever is left + if maxValidTTL-ttl < 0 { + warnings = append(warnings, + fmt.Sprintf("TTL of %q exceeded the effective max_ttl of %q; TTL value is capped accordingly", + humanDuration(ttl), humanDuration(maxValidTTL))) + ttl = maxValidTTL + } + } + + return ttl, warnings, nil +} + +// humanDuration prints the time duration without zero elements. +func humanDuration(d time.Duration) string { + if d == 0 { + return "0s" + } + + s := d.String() + if strings.HasSuffix(s, "m0s") { + s = s[:len(s)-2] + } + if idx := strings.Index(s, "h0m"); idx > 0 { + s = s[:idx+1] + s[idx+3:] + } + return s +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go b/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go new file mode 100644 index 00000000..1fa23899 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go @@ -0,0 +1,695 @@ +package framework + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" + "github.com/mitchellh/mapstructure" +) + +// OpenAPI specification (OAS): https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md +const OASVersion = "3.0.2" + +// NewOASDocument returns an empty OpenAPI document. +func NewOASDocument() *OASDocument { + return &OASDocument{ + Version: OASVersion, + Info: OASInfo{ + Title: "HashiCorp Vault API", + Description: "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + Version: version.GetVersion().Version, + License: OASLicense{ + Name: "Mozilla Public License 2.0", + URL: "https://www.mozilla.org/en-US/MPL/2.0", + }, + }, + Paths: make(map[string]*OASPathItem), + } +} + +// NewOASDocumentFromMap builds an OASDocument from an existing map version of a document. +// If a document has been decoded from JSON or received from a plugin, it will be as a map[string]interface{} +// and needs special handling beyond the default mapstructure decoding. +func NewOASDocumentFromMap(input map[string]interface{}) (*OASDocument, error) { + + // The Responses map uses integer keys (the response code), but once translated into JSON + // (e.g. during the plugin transport) these become strings. mapstructure will not coerce these back + // to integers without a custom decode hook. + decodeHook := func(src reflect.Type, tgt reflect.Type, inputRaw interface{}) (interface{}, error) { + + // Only alter data if: + // 1. going from string to int + // 2. string represent an int in status code range (100-599) + if src.Kind() == reflect.String && tgt.Kind() == reflect.Int { + if input, ok := inputRaw.(string); ok { + if intval, err := strconv.Atoi(input); err == nil { + if intval >= 100 && intval < 600 { + return intval, nil + } + } + } + } + return inputRaw, nil + } + + doc := new(OASDocument) + + config := &mapstructure.DecoderConfig{ + DecodeHook: decodeHook, + Result: doc, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + if err := decoder.Decode(input); err != nil { + return nil, err + } + + return doc, nil +} + +type OASDocument struct { + Version string `json:"openapi" mapstructure:"openapi"` + Info OASInfo `json:"info"` + Paths map[string]*OASPathItem `json:"paths"` +} + +type OASInfo struct { + Title string `json:"title"` + Description string `json:"description"` + Version string `json:"version"` + License OASLicense `json:"license"` +} + +type OASLicense struct { + Name string `json:"name"` + URL string `json:"url"` +} + +type OASPathItem struct { + Description string `json:"description,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` + Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` + CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + DisplayNavigation bool `json:"x-vault-displayNavigation,omitempty" mapstructure:"x-vault-displayNavigation"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` + + Get *OASOperation `json:"get,omitempty"` + Post *OASOperation `json:"post,omitempty"` + Delete *OASOperation `json:"delete,omitempty"` +} + +// NewOASOperation creates an empty OpenAPI Operations object. +func NewOASOperation() *OASOperation { + return &OASOperation{ + Responses: make(map[int]*OASResponse), + } +} + +type OASOperation struct { + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + OperationID string `json:"operationId,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + RequestBody *OASRequestBody `json:"requestBody,omitempty"` + Responses map[int]*OASResponse `json:"responses"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type OASParameter struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + In string `json:"in"` + Schema *OASSchema `json:"schema,omitempty"` + Required bool `json:"required,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type OASRequestBody struct { + Description string `json:"description,omitempty"` + Content OASContent `json:"content,omitempty"` +} + +type OASContent map[string]*OASMediaTypeObject + +type OASMediaTypeObject struct { + Schema *OASSchema `json:"schema,omitempty"` +} + +type OASSchema struct { + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + Properties map[string]*OASSchema `json:"properties,omitempty"` + + // Required is a list of keys in Properties that are required to be present. This is a different + // approach than OASParameter (unfortunately), but is how JSONSchema handles 'required'. + Required []string `json:"required,omitempty"` + + Items *OASSchema `json:"items,omitempty"` + Format string `json:"format,omitempty"` + Pattern string `json:"pattern,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + //DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` + DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` + DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` + DisplayGroup string `json:"x-vault-displayGroup,omitempty" mapstructure:"x-vault-displayGroup,omitempty"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs,omitempty"` +} + +type OASResponse struct { + Description string `json:"description"` + Content OASContent `json:"content,omitempty"` +} + +var OASStdRespOK = &OASResponse{ + Description: "OK", +} + +var OASStdRespNoContent = &OASResponse{ + Description: "empty body", +} + +// Regex for handling optional and named parameters in paths, and string cleanup. +// Predefined here to avoid substantial recompilation. + +// Capture optional path elements in ungreedy (?U) fashion +// Both "(leases/)?renew" and "(/(?P.+))?" formats are detected +var optRe = regexp.MustCompile(`(?U)\([^(]*\)\?|\(/\(\?P<[^(]*\)\)\?`) + +var reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" +var altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" +var pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", +var cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning +var cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning +var wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning +var altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" +var altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" +var nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters + +// documentPaths parses all paths in a framework.Backend into OpenAPI paths. +func documentPaths(backend *Backend, doc *OASDocument) error { + for _, p := range backend.Paths { + if err := documentPath(p, backend.SpecialPaths(), backend.BackendType, doc); err != nil { + return err + } + } + + return nil +} + +// documentPath parses a framework.Path into one or more OpenAPI paths. +func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.BackendType, doc *OASDocument) error { + var sudoPaths []string + var unauthPaths []string + + if specialPaths != nil { + sudoPaths = specialPaths.Root + unauthPaths = specialPaths.Unauthenticated + } + + // Convert optional parameters into distinct patterns to be process independently. + paths := expandPattern(p.Pattern) + + for _, path := range paths { + // Construct a top level PathItem which will be populated as the path is processed. + pi := OASPathItem{ + Description: cleanString(p.HelpSynopsis), + } + + pi.Sudo = specialPathMatch(path, sudoPaths) + pi.Unauthenticated = specialPathMatch(path, unauthPaths) + pi.DisplayAttrs = p.DisplayAttrs + + // If the newer style Operations map isn't defined, create one from the legacy fields. + operations := p.Operations + if operations == nil { + operations = make(map[logical.Operation]OperationHandler) + + for opType, cb := range p.Callbacks { + operations[opType] = &PathOperation{ + Callback: cb, + Summary: p.HelpSynopsis, + } + } + } + + // Process path and header parameters, which are common to all operations. + // Body fields will be added to individual operations. + pathFields, bodyFields := splitFields(p.Fields, path) + + for name, field := range pathFields { + location := "path" + required := true + + if field.Query { + location = "query" + required = false + } + + t := convertType(field.Type) + p := OASParameter{ + Name: name, + Description: cleanString(field.Description), + In: location, + Schema: &OASSchema{ + Type: t.baseType, + Pattern: t.pattern, + Enum: field.AllowedValues, + Default: field.Default, + DisplayAttrs: field.DisplayAttrs, + }, + Required: required, + Deprecated: field.Deprecated, + } + pi.Parameters = append(pi.Parameters, p) + } + + // Sort parameters for a stable output + sort.Slice(pi.Parameters, func(i, j int) bool { + return strings.ToLower(pi.Parameters[i].Name) < strings.ToLower(pi.Parameters[j].Name) + }) + + // Process each supported operation by building up an Operation object + // with descriptions, properties and examples from the framework.Path data. + for opType, opHandler := range operations { + props := opHandler.Properties() + if props.Unpublished { + continue + } + + if opType == logical.CreateOperation { + pi.CreateSupported = true + + // If both Create and Update are defined, only process Update. + if operations[logical.UpdateOperation] != nil { + continue + } + } + + // If both List and Read are defined, only process Read. + if opType == logical.ListOperation && operations[logical.ReadOperation] != nil { + continue + } + + op := NewOASOperation() + + op.Summary = props.Summary + op.Description = props.Description + op.Deprecated = props.Deprecated + + // Add any fields not present in the path as body parameters for POST. + if opType == logical.CreateOperation || opType == logical.UpdateOperation { + s := &OASSchema{ + Type: "object", + Properties: make(map[string]*OASSchema), + Required: make([]string, 0), + } + + for name, field := range bodyFields { + openapiField := convertType(field.Type) + if field.Required { + s.Required = append(s.Required, name) + } + + p := OASSchema{ + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: field.DisplayAttrs, + } + if openapiField.baseType == "array" { + p.Items = &OASSchema{ + Type: openapiField.items, + } + } + s.Properties[name] = &p + } + + // If examples were given, use the first one as the sample + // of this schema. + if len(props.Examples) > 0 { + s.Example = props.Examples[0].Data + } + + // Set the final request body. Only JSON request data is supported. + if len(s.Properties) > 0 || s.Example != nil { + op.RequestBody = &OASRequestBody{ + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: s, + }, + }, + } + } + } + + // LIST is represented as GET with a `list` query parameter + if opType == logical.ListOperation || (opType == logical.ReadOperation && operations[logical.ListOperation] != nil) { + op.Parameters = append(op.Parameters, OASParameter{ + Name: "list", + Description: "Return a list if `true`", + In: "query", + Schema: &OASSchema{Type: "string"}, + }) + } + + // Add tags based on backend type + var tags []string + switch backendType { + case logical.TypeLogical: + tags = []string{"secrets"} + case logical.TypeCredential: + tags = []string{"auth"} + } + + op.Tags = append(op.Tags, tags...) + + // Set default responses. + if len(props.Responses) == 0 { + if opType == logical.DeleteOperation { + op.Responses[204] = OASStdRespNoContent + } else { + op.Responses[200] = OASStdRespOK + } + } + + // Add any defined response details. + for code, responses := range props.Responses { + var description string + content := make(OASContent) + + for i, resp := range responses { + if i == 0 { + description = resp.Description + } + if resp.Example != nil { + mediaType := resp.MediaType + if mediaType == "" { + mediaType = "application/json" + } + + // create a version of the response that will not emit null items + cr := cleanResponse(resp.Example) + + // Only one example per media type is allowed, so first one wins + if _, ok := content[mediaType]; !ok { + content[mediaType] = &OASMediaTypeObject{ + Schema: &OASSchema{ + Example: cr, + }, + } + } + } + } + + op.Responses[code] = &OASResponse{ + Description: description, + Content: content, + } + } + + switch opType { + case logical.CreateOperation, logical.UpdateOperation: + pi.Post = op + case logical.ReadOperation, logical.ListOperation: + pi.Get = op + case logical.DeleteOperation: + pi.Delete = op + } + } + + doc.Paths["/"+path] = &pi + } + + return nil +} + +func specialPathMatch(path string, specialPaths []string) bool { + // Test for exact or prefix match of special paths. + for _, sp := range specialPaths { + if sp == path || + (strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1])) { + return true + } + } + return false +} + +// expandPattern expands a regex pattern by generating permutations of any optional parameters +// and changing named parameters into their {openapi} equivalents. +func expandPattern(pattern string) []string { + var paths []string + + // GenericNameRegex adds a regex that complicates our parsing. It is much easier to + // detect and remove it now than to compensate for in the other regexes. + // + // example: (?P\\w(([\\w-.]+)?\\w)?) -> (?P) + base := GenericNameRegex("") + start := strings.Index(base, ">") + end := strings.LastIndex(base, ")") + regexToRemove := "" + if start != -1 && end != -1 && end > start { + regexToRemove = base[start+1 : end] + } + pattern = strings.Replace(pattern, regexToRemove, "", -1) + + // Simplify named fields that have limited options, e.g. (?Pa|b|c) -> (.+) + pattern = altFieldsGroupRe.ReplaceAllStringFunc(pattern, func(s string) string { + return altFieldsRe.ReplaceAllString(s, ".+") + }) + + // Initialize paths with the original pattern or the halves of an + // alternation, which is also present in some patterns. + matches := altRe.FindAllStringSubmatch(pattern, -1) + if len(matches) > 0 { + paths = []string{matches[0][1], matches[0][2]} + } else { + paths = []string{pattern} + } + + // Expand all optional regex elements into two paths. This approach is really only useful up to 2 optional + // groups, but we probably don't want to deal with the exponential increase beyond that anyway. + for i := 0; i < len(paths); i++ { + p := paths[i] + + // match is a 2-element slice that will have a start and end index + // for the left-most match of a regex of form: (lease/)? + match := optRe.FindStringIndex(p) + + if match != nil { + // create a path that includes the optional element but without + // parenthesis or the '?' character. + paths[i] = p[:match[0]] + p[match[0]+1:match[1]-2] + p[match[1]:] + + // create a path that excludes the optional element. + paths = append(paths, p[:match[0]]+p[match[1]:]) + i-- + } + } + + // Replace named parameters (?P) with {foo} + var replacedPaths []string + + for _, path := range paths { + result := reqdRe.FindAllStringSubmatch(path, -1) + if result != nil { + for _, p := range result { + par := p[1] + path = strings.Replace(path, p[0], fmt.Sprintf("{%s}", par), 1) + } + } + // Final cleanup + path = cleanSuffixRe.ReplaceAllString(path, "") + path = cleanCharsRe.ReplaceAllString(path, "") + replacedPaths = append(replacedPaths, path) + } + + return replacedPaths +} + +// schemaType is a subset of the JSON Schema elements used as a target +// for conversions from Vault's standard FieldTypes. +type schemaType struct { + baseType string + items string + format string + pattern string +} + +// convertType translates a FieldType into an OpenAPI type. +// In the case of arrays, a subtype is returned as well. +func convertType(t FieldType) schemaType { + ret := schemaType{} + + switch t { + case TypeString, TypeHeader: + ret.baseType = "string" + case TypeNameString: + ret.baseType = "string" + ret.pattern = `\w([\w-.]*\w)?` + case TypeLowerCaseString: + ret.baseType = "string" + ret.format = "lowercase" + case TypeInt: + ret.baseType = "integer" + case TypeDurationSecond, TypeSignedDurationSecond: + ret.baseType = "integer" + ret.format = "seconds" + case TypeBool: + ret.baseType = "boolean" + case TypeMap: + ret.baseType = "object" + ret.format = "map" + case TypeKVPairs: + ret.baseType = "object" + ret.format = "kvpairs" + case TypeSlice: + ret.baseType = "array" + ret.items = "object" + case TypeStringSlice, TypeCommaStringSlice: + ret.baseType = "array" + ret.items = "string" + case TypeCommaIntSlice: + ret.baseType = "array" + ret.items = "integer" + default: + log.L().Warn("error parsing field type", "type", t) + ret.format = "unknown" + } + + return ret +} + +// cleanString prepares s for inclusion in the output +func cleanString(s string) string { + // clean leading/trailing whitespace, and replace whitespace runs into a single space + s = strings.TrimSpace(s) + s = wsRe.ReplaceAllString(s, " ") + return s +} + +// splitFields partitions fields into path and body groups +// The input pattern is expected to have been run through expandPattern, +// with paths parameters denotes in {braces}. +func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, bodyFields map[string]*FieldSchema) { + pathFields = make(map[string]*FieldSchema) + bodyFields = make(map[string]*FieldSchema) + + for _, match := range pathFieldsRe.FindAllStringSubmatch(pattern, -1) { + name := match[1] + pathFields[name] = allFields[name] + } + + for name, field := range allFields { + if _, ok := pathFields[name]; !ok { + if field.Query { + pathFields[name] = field + } else { + bodyFields[name] = field + } + } + } + + return pathFields, bodyFields +} + +// cleanedResponse is identical to logical.Response but with nulls +// removed from from JSON encoding +type cleanedResponse struct { + Secret *logical.Secret `json:"secret,omitempty"` + Auth *logical.Auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Redirect string `json:"redirect,omitempty"` + Warnings []string `json:"warnings,omitempty"` + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` +} + +func cleanResponse(resp *logical.Response) *cleanedResponse { + return &cleanedResponse{ + Secret: resp.Secret, + Auth: resp.Auth, + Data: resp.Data, + Redirect: resp.Redirect, + Warnings: resp.Warnings, + WrapInfo: resp.WrapInfo, + Headers: resp.Headers, + } +} + +// CreateOperationIDs generates unique operationIds for all paths/methods. +// The transform will convert path/method into camelcase. e.g.: +// +// /sys/tools/random/{urlbytes} -> postSysToolsRandomUrlbytes +// +// In the unlikely case of a duplicate ids, a numeric suffix is added: +// postSysToolsRandomUrlbytes_2 +// +// An optional user-provided suffix ("context") may also be appended. +func (d *OASDocument) CreateOperationIDs(context string) { + opIDCount := make(map[string]int) + var paths []string + + // traverse paths in a stable order to ensure stable output + for path := range d.Paths { + paths = append(paths, path) + } + sort.Strings(paths) + + for _, path := range paths { + pi := d.Paths[path] + for _, method := range []string{"get", "post", "delete"} { + var oasOperation *OASOperation + switch method { + case "get": + oasOperation = pi.Get + case "post": + oasOperation = pi.Post + case "delete": + oasOperation = pi.Delete + } + + if oasOperation == nil { + continue + } + + // Space-split on non-words, title case everything, recombine + opID := nonWordRe.ReplaceAllString(strings.ToLower(path), " ") + opID = strings.Title(opID) + opID = method + strings.Replace(opID, " ", "", -1) + + // deduplicate operationIds. This is a safeguard, since generated IDs should + // already be unique given our current path naming conventions. + opIDCount[opID]++ + if opIDCount[opID] > 1 { + opID = fmt.Sprintf("%s_%d", opID, opIDCount[opID]) + } + + if context != "" { + opID += "_" + context + } + + oasOperation.OperationID = opID + } + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/path.go b/vendor/github.com/hashicorp/vault/sdk/framework/path.go new file mode 100644 index 00000000..8f5dd5be --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/path.go @@ -0,0 +1,332 @@ +package framework + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/logical" +) + +// Helper which returns a generic regex string for creating endpoint patterns +// that are identified by the given name in the backends +func GenericNameRegex(name string) string { + return fmt.Sprintf("(?P<%s>\\w(([\\w-.]+)?\\w)?)", name) +} + +// GenericNameWithAtRegex returns a generic regex that allows alphanumeric +// characters along with -, . and @. +func GenericNameWithAtRegex(name string) string { + return fmt.Sprintf("(?P<%s>\\w(([\\w-.@]+)?\\w)?)", name) +} + +// Helper which returns a regex string for optionally accepting the a field +// from the API URL +func OptionalParamRegex(name string) string { + return fmt.Sprintf("(/(?P<%s>.+))?", name) +} + +// Helper which returns a regex string for capturing an entire endpoint path +// as the given name. +func MatchAllRegex(name string) string { + return fmt.Sprintf(`(?P<%s>.*)`, name) +} + +// PathAppend is a helper for appending lists of paths into a single +// list. +func PathAppend(paths ...[]*Path) []*Path { + result := make([]*Path, 0, 10) + for _, ps := range paths { + result = append(result, ps...) + } + + return result +} + +// Path is a single path that the backend responds to. +type Path struct { + // Pattern is the pattern of the URL that matches this path. + // + // This should be a valid regular expression. Named captures will be + // exposed as fields that should map to a schema in Fields. If a named + // capture is not a field in the Fields map, then it will be ignored. + Pattern string + + // Fields is the mapping of data fields to a schema describing that + // field. Named captures in the Pattern also map to fields. If a named + // capture name matches a PUT body name, the named capture takes + // priority. + // + // Note that only named capture fields are available in every operation, + // whereas all fields are available in the Write operation. + Fields map[string]*FieldSchema + + // Operations is the set of operations supported and the associated OperationsHandler. + // + // If both Create and Update operations are present, documentation and examples from + // the Update definition will be used. Similarly if both Read and List are present, + // Read will be used for documentation. + Operations map[logical.Operation]OperationHandler + + // Callbacks are the set of callbacks that are called for a given + // operation. If a callback for a specific operation is not present, + // then logical.ErrUnsupportedOperation is automatically generated. + // + // The help operation is the only operation that the Path will + // automatically handle if the Help field is set. If both the Help + // field is set and there is a callback registered here, then the + // callback will be called. + // + // Deprecated: Operations should be used instead and will take priority if present. + Callbacks map[logical.Operation]OperationFunc + + // ExistenceCheck, if implemented, is used to query whether a given + // resource exists or not. This is used for ACL purposes: if an Update + // action is specified, and the existence check returns false, the action + // is not allowed since the resource must first be created. The reverse is + // also true. If not specified, the Update action is forced and the user + // must have UpdateCapability on the path. + ExistenceCheck ExistenceFunc + + // FeatureRequired, if implemented, will validate if the given feature is + // enabled for the set of paths + FeatureRequired license.Features + + // Deprecated denotes that this path is considered deprecated. This may + // be reflected in help and documentation. + Deprecated bool + + // Help is text describing how to use this path. This will be used + // to auto-generate the help operation. The Path will automatically + // generate a parameter listing and URL structure based on the + // regular expression, so the help text should just contain a description + // of what happens. + // + // HelpSynopsis is a one-sentence description of the path. This will + // be automatically line-wrapped at 80 characters. + // + // HelpDescription is a long-form description of the path. This will + // be automatically line-wrapped at 80 characters. + HelpSynopsis string + HelpDescription string + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +// OperationHandler defines and describes a specific operation handler. +type OperationHandler interface { + Handler() OperationFunc + Properties() OperationProperties +} + +// OperationProperties describes an operation for documentation, help text, +// and other clients. A Summary should always be provided, whereas other +// fields can be populated as needed. +type OperationProperties struct { + // Summary is a brief (usually one line) description of the operation. + Summary string + + // Description is extended documentation of the operation and may contain + // Markdown-formatted text markup. + Description string + + // Examples provides samples of the expected request data. The most + // relevant example should be first in the list, as it will be shown in + // documentation that supports only a single example. + Examples []RequestExample + + // Responses provides a list of response description for a given response + // code. The most relevant response should be first in the list, as it will + // be shown in documentation that only allows a single example. + Responses map[int][]Response + + // Unpublished indicates that this operation should not appear in public + // documentation or help text. The operation may still have documentation + // attached that can be used internally. + Unpublished bool + + // Deprecated indicates that this operation should be avoided. + Deprecated bool + + // ForwardPerformanceStandby indicates that this path should not be processed + // on a performance standby node, and should be forwarded to the active node instead. + ForwardPerformanceStandby bool + + // ForwardPerformanceSecondary indicates that this path should not be processed + // on a performance secondary node, and should be forwarded to the active node instead. + ForwardPerformanceSecondary bool + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +type DisplayAttributes struct { + // Name is the name of the field suitable as a label or documentation heading. + Name string `json:"name,omitempty"` + + // Value is a sample value to display for this field. This may be used + // to indicate a default value, but it is for display only and completely separate + // from any Default member handling. + Value interface{} `json:"value,omitempty"` + + // Sensitive indicates that the value should be masked by default in the UI. + Sensitive bool `json:"sensitive,omitempty"` + + // Navigation indicates that the path should be available as a navigation tab + Navigation bool `json:"navigation,omitempty"` + + // ItemType is the type of item this path operates on + ItemType string `json:"itemType,omitempty"` + + // Group is the suggested UI group to place this field in. + Group string `json:"group,omitempty"` + + // Action is the verb to use for the operation. + Action string `json:"action,omitempty"` + + // EditType is the type of form field needed for a property + // e.g. "textarea" or "file" + EditType string `json:"editType,omitempty"` +} + +// RequestExample is example of request data. +type RequestExample struct { + Description string // optional description of the request + Data map[string]interface{} // map version of sample JSON request data + + // Optional example response to the sample request. This approach is considered + // provisional for now, and this field may be changed or removed. + Response *Response +} + +// Response describes and optional demonstrations an operation response. +type Response struct { + Description string // summary of the the response and should always be provided + MediaType string // media type of the response, defaulting to "application/json" if empty + Example *logical.Response // example response data +} + +// PathOperation is a concrete implementation of OperationHandler. +type PathOperation struct { + Callback OperationFunc + Summary string + Description string + Examples []RequestExample + Responses map[int][]Response + Unpublished bool + Deprecated bool + ForwardPerformanceSecondary bool + ForwardPerformanceStandby bool +} + +func (p *PathOperation) Handler() OperationFunc { + return p.Callback +} + +func (p *PathOperation) Properties() OperationProperties { + return OperationProperties{ + Summary: strings.TrimSpace(p.Summary), + Description: strings.TrimSpace(p.Description), + Responses: p.Responses, + Examples: p.Examples, + Unpublished: p.Unpublished, + Deprecated: p.Deprecated, + ForwardPerformanceSecondary: p.ForwardPerformanceSecondary, + ForwardPerformanceStandby: p.ForwardPerformanceStandby, + } +} + +func (p *Path) helpCallback(b *Backend) OperationFunc { + return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + var tplData pathTemplateData + tplData.Request = req.Path + tplData.RoutePattern = p.Pattern + tplData.Synopsis = strings.TrimSpace(p.HelpSynopsis) + if tplData.Synopsis == "" { + tplData.Synopsis = "" + } + tplData.Description = strings.TrimSpace(p.HelpDescription) + if tplData.Description == "" { + tplData.Description = "" + } + + // Alphabetize the fields + fieldKeys := make([]string, 0, len(p.Fields)) + for k, _ := range p.Fields { + fieldKeys = append(fieldKeys, k) + } + sort.Strings(fieldKeys) + + // Build the field help + tplData.Fields = make([]pathTemplateFieldData, len(fieldKeys)) + for i, k := range fieldKeys { + schema := p.Fields[k] + description := strings.TrimSpace(schema.Description) + if description == "" { + description = "" + } + + tplData.Fields[i] = pathTemplateFieldData{ + Key: k, + Type: schema.Type.String(), + Description: description, + Deprecated: schema.Deprecated, + } + } + + help, err := executeTemplate(pathHelpTemplate, &tplData) + if err != nil { + return nil, errwrap.Wrapf("error executing template: {{err}}", err) + } + + // Build OpenAPI response for this path + doc := NewOASDocument() + if err := documentPath(p, b.SpecialPaths(), b.BackendType, doc); err != nil { + b.Logger().Warn("error generating OpenAPI", "error", err) + } + + return logical.HelpResponse(help, nil, doc), nil + } +} + +type pathTemplateData struct { + Request string + RoutePattern string + Synopsis string + Description string + Fields []pathTemplateFieldData +} + +type pathTemplateFieldData struct { + Key string + Type string + Deprecated bool + Description string + URL bool +} + +const pathHelpTemplate = ` +Request: {{.Request}} +Matching Route: {{.RoutePattern}} + +{{.Synopsis}} + +{{ if .Fields -}} +## PARAMETERS +{{range .Fields}} +{{indent 4 .Key}} ({{.Type}}) +{{if .Deprecated}} +{{printf "(DEPRECATED) %s" .Description | indent 8}} +{{else}} +{{indent 8 .Description}} +{{end}}{{end}}{{end}} +## DESCRIPTION + +{{.Description}} +` diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go b/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go new file mode 100644 index 00000000..8e1b9186 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go @@ -0,0 +1,286 @@ +package framework + +import ( + "context" + "fmt" + "strings" + "sync" + + saltpkg "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// +// PathMap can be used to generate a path that stores mappings in the +// storage. It is a structure that also exports functions for querying the +// mappings. +// +// The primary use case for this is for credential providers to do their +// mapping to policies. +type PathMap struct { + Prefix string + Name string + Schema map[string]*FieldSchema + CaseSensitive bool + Salt *saltpkg.Salt + SaltFunc func(context.Context) (*saltpkg.Salt, error) + + once sync.Once +} + +func (p *PathMap) init() { + if p.Prefix == "" { + p.Prefix = "map" + } + + if p.Schema == nil { + p.Schema = map[string]*FieldSchema{ + "value": &FieldSchema{ + Type: TypeString, + Description: fmt.Sprintf("Value for %s mapping", p.Name), + }, + } + } +} + +// pathStruct returns the pathStruct for this mapping +func (p *PathMap) pathStruct(ctx context.Context, s logical.Storage, k string) (*PathStruct, error) { + p.once.Do(p.init) + + // If we don't care about casing, store everything lowercase + if !p.CaseSensitive { + k = strings.ToLower(k) + } + + // The original key before any salting + origKey := k + + // If we have a salt, apply it before lookup + salt := p.Salt + var err error + if p.SaltFunc != nil { + salt, err = p.SaltFunc(ctx) + if err != nil { + return nil, err + } + } + if salt != nil { + k = "s" + salt.SaltIDHashFunc(k, saltpkg.SHA256Hash) + } + + finalName := fmt.Sprintf("map/%s/%s", p.Name, k) + ps := &PathStruct{ + Name: finalName, + Schema: p.Schema, + } + + if !strings.HasPrefix(origKey, "s") && k != origKey { + // Ensure that no matter what happens what is returned is the final + // path + defer func() { + ps.Name = finalName + }() + + // + // Check for unsalted version and upgrade if so + // + + // Generate the unsalted name + unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey) + // Set the path struct to use the unsalted name + ps.Name = unsaltedName + + val, err := ps.Get(ctx, s) + if err != nil { + return nil, err + } + // If not nil, we have an unsalted entry -- upgrade it + if val != nil { + // Set the path struct to use the desired final name + ps.Name = finalName + err = ps.Put(ctx, s, val) + if err != nil { + return nil, err + } + // Set it back to the old path and delete + ps.Name = unsaltedName + err = ps.Delete(ctx, s) + if err != nil { + return nil, err + } + // We'll set this in the deferred function but doesn't hurt here + ps.Name = finalName + } + + // + // Check for SHA1 hashed version and upgrade if so + // + + // Generate the SHA1 hash suffixed path name + sha1SuffixedName := fmt.Sprintf("map/%s/%s", p.Name, salt.SaltID(origKey)) + + // Set the path struct to use the SHA1 hash suffixed path name + ps.Name = sha1SuffixedName + + val, err = ps.Get(ctx, s) + if err != nil { + return nil, err + } + // If not nil, we have an SHA1 hash suffixed entry -- upgrade it + if val != nil { + // Set the path struct to use the desired final name + ps.Name = finalName + err = ps.Put(ctx, s, val) + if err != nil { + return nil, err + } + // Set it back to the old path and delete + ps.Name = sha1SuffixedName + err = ps.Delete(ctx, s) + if err != nil { + return nil, err + } + // We'll set this in the deferred function but doesn't hurt here + ps.Name = finalName + } + } + + return ps, nil +} + +// Get reads a value out of the mapping +func (p *PathMap) Get(ctx context.Context, s logical.Storage, k string) (map[string]interface{}, error) { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return nil, err + } + return ps.Get(ctx, s) +} + +// Put writes a value into the mapping +func (p *PathMap) Put(ctx context.Context, s logical.Storage, k string, v map[string]interface{}) error { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return err + } + return ps.Put(ctx, s, v) +} + +// Delete removes a value from the mapping +func (p *PathMap) Delete(ctx context.Context, s logical.Storage, k string) error { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return err + } + return ps.Delete(ctx, s) +} + +// List reads the keys under a given path +func (p *PathMap) List(ctx context.Context, s logical.Storage, prefix string) ([]string, error) { + stripPrefix := fmt.Sprintf("struct/map/%s/", p.Name) + fullPrefix := fmt.Sprintf("%s%s", stripPrefix, prefix) + out, err := s.List(ctx, fullPrefix) + if err != nil { + return nil, err + } + stripped := make([]string, len(out)) + for idx, k := range out { + stripped[idx] = strings.TrimPrefix(k, stripPrefix) + } + return stripped, nil +} + +// Paths are the paths to append to the Backend paths. +func (p *PathMap) Paths() []*Path { + p.once.Do(p.init) + + // Build the schema by simply adding the "key" + schema := make(map[string]*FieldSchema) + for k, v := range p.Schema { + schema[k] = v + } + schema["key"] = &FieldSchema{ + Type: TypeString, + Description: fmt.Sprintf("Key for the %s mapping", p.Name), + } + + return []*Path{ + &Path{ + Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name), + + Callbacks: map[logical.Operation]OperationFunc{ + logical.ListOperation: p.pathList(), + logical.ReadOperation: p.pathList(), + }, + + HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name), + }, + + &Path{ + Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name), + + Fields: schema, + + Callbacks: map[logical.Operation]OperationFunc{ + logical.CreateOperation: p.pathSingleWrite(), + logical.ReadOperation: p.pathSingleRead(), + logical.UpdateOperation: p.pathSingleWrite(), + logical.DeleteOperation: p.pathSingleDelete(), + }, + + HelpSynopsis: fmt.Sprintf("Read/write/delete a single %s mapping", p.Name), + + ExistenceCheck: p.pathSingleExistenceCheck(), + }, + } +} + +func (p *PathMap) pathList() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + keys, err := p.List(ctx, req.Storage, "") + if err != nil { + return nil, err + } + + return logical.ListResponse(keys), nil + } +} + +func (p *PathMap) pathSingleRead() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + v, err := p.Get(ctx, req.Storage, d.Get("key").(string)) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: v, + }, nil + } +} + +func (p *PathMap) pathSingleWrite() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Put(ctx, req.Storage, d.Get("key").(string), d.Raw) + return nil, err + } +} + +func (p *PathMap) pathSingleDelete() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Delete(ctx, req.Storage, d.Get("key").(string)) + return nil, err + } +} + +func (p *PathMap) pathSingleExistenceCheck() ExistenceFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) { + v, err := p.Get(ctx, req.Storage, d.Get("key").(string)) + if err != nil { + return false, err + } + return v != nil, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/path_struct.go b/vendor/github.com/hashicorp/vault/sdk/framework/path_struct.go new file mode 100644 index 00000000..2a2848e5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/path_struct.go @@ -0,0 +1,124 @@ +package framework + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// PathStruct can be used to generate a path that stores a struct +// in the storage. This structure is a map[string]interface{} but the +// types are set according to the schema in this structure. +type PathStruct struct { + Name string + Path string + Schema map[string]*FieldSchema + HelpSynopsis string + HelpDescription string + + Read bool +} + +// Get reads the structure. +func (p *PathStruct) Get(ctx context.Context, s logical.Storage) (map[string]interface{}, error) { + entry, err := s.Get(ctx, fmt.Sprintf("struct/%s", p.Name)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result map[string]interface{} + if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Put writes the structure. +func (p *PathStruct) Put(ctx context.Context, s logical.Storage, v map[string]interface{}) error { + bytes, err := json.Marshal(v) + if err != nil { + return err + } + + return s.Put(ctx, &logical.StorageEntry{ + Key: fmt.Sprintf("struct/%s", p.Name), + Value: bytes, + }) +} + +// Delete removes the structure. +func (p *PathStruct) Delete(ctx context.Context, s logical.Storage) error { + return s.Delete(ctx, fmt.Sprintf("struct/%s", p.Name)) +} + +// Paths are the paths to append to the Backend paths. +func (p *PathStruct) Paths() []*Path { + // The single path we support to read/write this config + path := &Path{ + Pattern: p.Path, + Fields: p.Schema, + + Callbacks: map[logical.Operation]OperationFunc{ + logical.CreateOperation: p.pathWrite(), + logical.UpdateOperation: p.pathWrite(), + logical.DeleteOperation: p.pathDelete(), + }, + + ExistenceCheck: p.pathExistenceCheck(), + + HelpSynopsis: p.HelpSynopsis, + HelpDescription: p.HelpDescription, + } + + // If we support reads, add that + if p.Read { + path.Callbacks[logical.ReadOperation] = p.pathRead() + } + + return []*Path{path} +} + +func (p *PathStruct) pathRead() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + v, err := p.Get(ctx, req.Storage) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: v, + }, nil + } +} + +func (p *PathStruct) pathWrite() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Put(ctx, req.Storage, d.Raw) + return nil, err + } +} + +func (p *PathStruct) pathDelete() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Delete(ctx, req.Storage) + return nil, err + } +} + +func (p *PathStruct) pathExistenceCheck() ExistenceFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) { + v, err := p.Get(ctx, req.Storage) + if err != nil { + return false, err + } + + return v != nil, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go b/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go new file mode 100644 index 00000000..7657b4b0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go @@ -0,0 +1,68 @@ +package framework + +import ( + "context" + "sort" + "strings" + + "github.com/hashicorp/vault/sdk/logical" +) + +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// +// PolicyMap is a specialization of PathMap that expects the values to +// be lists of policies. This assists in querying and loading policies +// from the PathMap. +type PolicyMap struct { + PathMap + + DefaultKey string + PolicyKey string +} + +func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...string) ([]string, error) { + policyKey := "value" + if p.PolicyKey != "" { + policyKey = p.PolicyKey + } + + if p.DefaultKey != "" { + newNames := make([]string, len(names)+1) + newNames[0] = p.DefaultKey + copy(newNames[1:], names) + names = newNames + } + + set := make(map[string]struct{}) + for _, name := range names { + v, err := p.Get(ctx, s, name) + if err != nil { + return nil, err + } + + valuesRaw, ok := v[policyKey] + if !ok { + continue + } + + values, ok := valuesRaw.(string) + if !ok { + continue + } + + for _, p := range strings.Split(values, ",") { + if p = strings.TrimSpace(p); p != "" { + set[p] = struct{}{} + } + } + } + + list := make([]string, 0, len(set)) + for k, _ := range set { + list = append(list, k) + } + sort.Strings(list) + + return list, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/secret.go b/vendor/github.com/hashicorp/vault/sdk/framework/secret.go new file mode 100644 index 00000000..d338e06f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/secret.go @@ -0,0 +1,91 @@ +package framework + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// Secret is a type of secret that can be returned from a backend. +type Secret struct { + // Type is the name of this secret type. This is used to setup the + // vault ID and to look up the proper secret structure when revocation/ + // renewal happens. Once this is set this should not be changed. + // + // The format of this must match (case insensitive): ^a-Z0-9_$ + Type string + + // Fields is the mapping of data fields and schema that comprise + // the structure of this secret. + Fields map[string]*FieldSchema + + // DefaultDuration is the default value for the duration of the lease for + // this secret. This can be manually overwritten with the result of + // Response(). + // + // If these aren't set, Vault core will set a default lease period which + // may come from a mount tuning. + DefaultDuration time.Duration + + // Renew is the callback called to renew this secret. If Renew is + // not specified then renewable is set to false in the secret. + // See lease.go for helpers for this value. + Renew OperationFunc + + // Revoke is the callback called to revoke this secret. This is required. + Revoke OperationFunc +} + +func (s *Secret) Renewable() bool { + return s.Renew != nil +} + +func (s *Secret) Response( + data, internal map[string]interface{}) *logical.Response { + internalData := make(map[string]interface{}) + for k, v := range internal { + internalData[k] = v + } + internalData["secret_type"] = s.Type + + return &logical.Response{ + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: s.DefaultDuration, + Renewable: s.Renewable(), + }, + InternalData: internalData, + }, + + Data: data, + } +} + +// HandleRenew is the request handler for renewing this secret. +func (s *Secret) HandleRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if !s.Renewable() { + return nil, logical.ErrUnsupportedOperation + } + + data := &FieldData{ + Raw: req.Data, + Schema: s.Fields, + } + + return s.Renew(ctx, req, data) +} + +// HandleRevoke is the request handler for revoking this secret. +func (s *Secret) HandleRevoke(ctx context.Context, req *logical.Request) (*logical.Response, error) { + data := &FieldData{ + Raw: req.Data, + Schema: s.Fields, + } + + if s.Revoke != nil { + return s.Revoke(ctx, req, data) + } + + return nil, logical.ErrUnsupportedOperation +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/template.go b/vendor/github.com/hashicorp/vault/sdk/framework/template.go new file mode 100644 index 00000000..3abdd624 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/template.go @@ -0,0 +1,42 @@ +package framework + +import ( + "bufio" + "bytes" + "strings" + "text/template" + + "github.com/hashicorp/errwrap" +) + +func executeTemplate(tpl string, data interface{}) (string, error) { + // Define the functions + funcs := map[string]interface{}{ + "indent": funcIndent, + } + + // Parse the help template + t, err := template.New("root").Funcs(funcs).Parse(tpl) + if err != nil { + return "", errwrap.Wrapf("error parsing template: {{err}}", err) + } + + // Execute the template and store the output + var buf bytes.Buffer + if err := t.Execute(&buf, data); err != nil { + return "", errwrap.Wrapf("error executing template: {{err}}", err) + } + + return strings.TrimSpace(buf.String()), nil +} + +func funcIndent(count int, text string) string { + var buf bytes.Buffer + prefix := strings.Repeat(" ", count) + scan := bufio.NewScanner(strings.NewReader(text)) + for scan.Scan() { + buf.WriteString(prefix + scan.Text() + "\n") + } + + return strings.TrimRight(buf.String(), "\n") +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/testing.go b/vendor/github.com/hashicorp/vault/sdk/framework/testing.go new file mode 100644 index 00000000..a00a3241 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/testing.go @@ -0,0 +1,15 @@ +package framework + +import ( + "testing" +) + +// TestBackendRoutes is a helper to test that all the given routes will +// route properly in the backend. +func TestBackendRoutes(t *testing.T, b *Backend, rs []string) { + for _, r := range rs { + if b.Route(r) == nil { + t.Fatalf("bad route: %s", r) + } + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/wal.go b/vendor/github.com/hashicorp/vault/sdk/framework/wal.go new file mode 100644 index 00000000..7e7bb1af --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/framework/wal.go @@ -0,0 +1,101 @@ +package framework + +import ( + "context" + "encoding/json" + "strings" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// WALPrefix is the prefix within Storage where WAL entries will be written. +const WALPrefix = "wal/" + +type WALEntry struct { + ID string `json:"-"` + Kind string `json:"type"` + Data interface{} `json:"data"` + CreatedAt int64 `json:"created_at"` +} + +// PutWAL writes some data to the WAL. +// +// The kind parameter is used by the framework to allow users to store +// multiple kinds of WAL data and to easily disambiguate what data they're +// expecting. +// +// Data within the WAL that is uncommitted (CommitWAL hasn't be called) +// will be given to the rollback callback when an rollback operation is +// received, allowing the backend to clean up some partial states. +// +// The data must be JSON encodable. +// +// This returns a unique ID that can be used to reference this WAL data. +// WAL data cannot be modified. You can only add to the WAL and commit existing +// WAL entries. +func PutWAL(ctx context.Context, s logical.Storage, kind string, data interface{}) (string, error) { + value, err := json.Marshal(&WALEntry{ + Kind: kind, + Data: data, + CreatedAt: time.Now().UTC().Unix(), + }) + if err != nil { + return "", err + } + + id, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + return id, s.Put(ctx, &logical.StorageEntry{ + Key: WALPrefix + id, + Value: value, + }) +} + +// GetWAL reads a specific entry from the WAL. If the entry doesn't exist, +// then nil value is returned. +// +// The kind, value, and error are returned. +func GetWAL(ctx context.Context, s logical.Storage, id string) (*WALEntry, error) { + entry, err := s.Get(ctx, WALPrefix+id) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var raw WALEntry + if err := jsonutil.DecodeJSON(entry.Value, &raw); err != nil { + return nil, err + } + raw.ID = id + + return &raw, nil +} + +// DeleteWAL commits the WAL entry with the given ID. Once committed, +// it is assumed that the operation was a success and doesn't need to +// be rolled back. +func DeleteWAL(ctx context.Context, s logical.Storage, id string) error { + return s.Delete(ctx, WALPrefix+id) +} + +// ListWAL lists all the entries in the WAL. +func ListWAL(ctx context.Context, s logical.Storage) ([]string, error) { + keys, err := s.List(ctx, WALPrefix) + if err != nil { + return nil, err + } + + for i, k := range keys { + keys[i] = strings.TrimPrefix(k, WALPrefix) + } + + return keys, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go b/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go new file mode 100644 index 00000000..57a76d44 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go @@ -0,0 +1,50 @@ +// Package base62 provides utilities for working with base62 strings. +// base62 strings will only contain characters: 0-9, a-z, A-Z +package base62 + +import ( + "crypto/rand" + "io" + + uuid "github.com/hashicorp/go-uuid" +) + +const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" +const csLen = byte(len(charset)) + +// Random generates a random string using base-62 characters. +// Resulting entropy is ~5.95 bits/character. +func Random(length int) (string, error) { + return RandomWithReader(length, rand.Reader) +} + +// RandomWithReader generates a random string using base-62 characters and a given reader. +// Resulting entropy is ~5.95 bits/character. +func RandomWithReader(length int, reader io.Reader) (string, error) { + if length == 0 { + return "", nil + } + output := make([]byte, 0, length) + + // Request a bit more than length to reduce the chance + // of needing more than one batch of random bytes + batchSize := length + length/4 + + for { + buf, err := uuid.GenerateRandomBytesWithReader(batchSize, reader) + if err != nil { + return "", err + } + + for _, b := range buf { + // Avoid bias by using a value range that's a multiple of 62 + if b < (csLen * 4) { + output = append(output, charset[b%csLen]) + + if len(output) == length { + return string(output), nil + } + } + } + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go new file mode 100644 index 00000000..4a35f88d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go @@ -0,0 +1,806 @@ +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +// GetHexFormatted returns the byte buffer formatted in hex with +// the specified separator between bytes. +func GetHexFormatted(buf []byte, sep string) string { + var ret bytes.Buffer + for _, cur := range buf { + if ret.Len() > 0 { + fmt.Fprintf(&ret, sep) + } + fmt.Fprintf(&ret, "%02x", cur) + } + return ret.String() +} + +// ParseHexFormatted returns the raw bytes from a formatted hex string +func ParseHexFormatted(in, sep string) []byte { + var ret bytes.Buffer + var err error + var inBits int64 + inBytes := strings.Split(in, sep) + for _, inByte := range inBytes { + if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil { + return nil + } + ret.WriteByte(byte(inBits)) + } + return ret.Bytes() +} + +// GetSubjKeyID returns the subject key ID, e.g. the SHA1 sum +// of the marshaled public key +func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { + if privateKey == nil { + return nil, errutil.InternalError{Err: "passed-in private key is nil"} + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + + subjKeyID := sha1.Sum(marshaledKey) + + return subjKeyID[:], nil +} + +// ParsePKIMap takes a map (for instance, the Secret.Data +// returned from the PKI backend) and returns a ParsedCertBundle. +func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := mapstructure.Decode(data, result) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + + return result.ToParsedCertBundle() +} + +// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle. +// +// This can be either the output of an +// issue call from the PKI backend or just its data member; or, +// JSON not coming from the PKI backend. +func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := jsonutil.DecodeJSON(input, &result) + + if err == nil { + return result.ToParsedCertBundle() + } + + var secret Secret + err = jsonutil.DecodeJSON(input, &secret) + + if err == nil { + return ParsePKIMap(secret.Data) + } + + return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"} +} + +// ParsePEMBundle takes a string of concatenated PEM-format certificate +// and private key values and decodes/parses them, checking validity along +// the way. The first certificate must be the subject certificate and issuing +// certificates may follow. There must be at most one private key. +func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { + if len(pemBundle) == 0 { + return nil, errutil.UserError{Err: "empty pem bundle"} + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + parsedBundle := &ParsedCertBundle{} + var certPath []*CertBlock + + for len(pemBytes) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return nil, errutil.UserError{Err: "no data found in PEM block"} + } + + if signer, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + parsedBundle.PrivateKeyFormat = ECBlock + parsedBundle.PrivateKeyType = ECPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + + } else if signer, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + parsedBundle.PrivateKeyType = RSAPrivateKey + parsedBundle.PrivateKeyFormat = PKCS1Block + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + } else if signer, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { + parsedBundle.PrivateKeyFormat = PKCS8Block + + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "More than one private key given; provide only one private key in the bundle"} + } + switch signer := signer.(type) { + case *rsa.PrivateKey: + parsedBundle.PrivateKey = signer + parsedBundle.PrivateKeyType = RSAPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + case *ecdsa.PrivateKey: + parsedBundle.PrivateKey = signer + parsedBundle.PrivateKeyType = ECPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + } + } else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil { + certPath = append(certPath, &CertBlock{ + Certificate: certificates[0], + Bytes: pemBlock.Bytes, + }) + } + } + + for i, certBlock := range certPath { + if i == 0 { + parsedBundle.Certificate = certBlock.Certificate + parsedBundle.CertificateBytes = certBlock.Bytes + } else { + parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock) + } + } + + if err := parsedBundle.Verify(); err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)} + } + + return parsedBundle, nil +} + +// GeneratePrivateKey generates a private key with the specified type and key bits +func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { + var err error + var privateKeyType PrivateKeyType + var privateKeyBytes []byte + var privateKey crypto.Signer + + switch keyType { + case "rsa": + privateKeyType = RSAPrivateKey + privateKey, err = rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)} + } + privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) + case "ec": + privateKeyType = ECPrivateKey + var curve elliptic.Curve + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + default: + return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)} + } + privateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)} + } + default: + return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)} + } + + container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes) + return nil +} + +// GenerateSerialNumber generates a serial number suitable for a certificate +func GenerateSerialNumber() (*big.Int, error) { + serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)} + } + return serial, nil +} + +// ComparePublicKeys compares two public keys and returns true if they match +func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + switch key1Iface.(type) { + case *rsa.PublicKey: + key1 := key1Iface.(*rsa.PublicKey) + key2, ok := key2Iface.(*rsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.N.Cmp(key2.N) != 0 || + key1.E != key2.E { + return false, nil + } + return true, nil + + case *ecdsa.PublicKey: + key1 := key1Iface.(*ecdsa.PublicKey) + key2, ok := key2Iface.(*ecdsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.X.Cmp(key2.X) != 0 || + key1.Y.Cmp(key2.Y) != 0 { + return false, nil + } + key1Params := key1.Params() + key2Params := key2.Params() + if key1Params.P.Cmp(key2Params.P) != 0 || + key1Params.N.Cmp(key2Params.N) != 0 || + key1Params.B.Cmp(key2Params.B) != 0 || + key1Params.Gx.Cmp(key2Params.Gx) != 0 || + key1Params.Gy.Cmp(key2Params.Gy) != 0 || + key1Params.BitSize != key2Params.BitSize { + return false, nil + } + return true, nil + + default: + return false, fmt.Errorf("cannot compare key with type %T", key1Iface) + } +} + +// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs +func ParsePublicKeyPEM(data []byte) (interface{}, error) { + block, data := pem.Decode(data) + if block != nil { + var rawKey interface{} + var err error + if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + rawKey = cert.PublicKey + } else { + return nil, err + } + } + + if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok { + return rsaPublicKey, nil + } + if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok { + return ecPublicKey, nil + } + } + + return nil, errors.New("data does not contain any valid RSA or ECDSA public keys") +} + +// addPolicyIdentifiers adds certificate policies extension +// +func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.PolicyIdentifiers { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) + } + } +} + +// addExtKeyUsageOids adds custom extended key usage OIDs to certificate +func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.ExtKeyUsageOIDs { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) + } + } +} + +func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := HandleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } + } + return nil +} + +func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := StringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier{2, 5, 29, 17}, + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func StringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return asn1.ObjectIdentifier(ret), nil +} + +func ValidateKeyTypeLength(keyType string, keyBits int) error { + switch keyType { + case "rsa": + switch keyBits { + case 2048: + case 4096: + case 8192: + default: + return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) + } + case "ec": + switch keyBits { + case 224: + case 256: + case 384: + case 521: + default: + return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) + } + case "any": + default: + return fmt.Errorf("unknown key type %s", keyType) + } + + return nil +} + +// Performs the heavy lifting of creating a certificate. Returns +// a fully-filled-in ParsedCertBundle. +func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + var err error + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + if err := GeneratePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result); err != nil { + return nil, err + } + + subjKeyID, err := GetSubjKeyID(result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} + } + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + IsCA: false, + SubjectKeyId: subjKeyID, + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + // Add this before calling addKeyUsages + if data.SigningBundle == nil { + certTemplate.IsCA = true + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + // This will only be filled in from the generation paths + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.Params.URLs.OCSPServers + + var certBytes []byte + if data.SigningBundle != nil { + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case ECPrivateKey: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + caCert := data.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) + } else { + // Creating a self-signed root + if data.Params.MaxPathLength == 0 { + certTemplate.MaxPathLen = 0 + certTemplate.MaxPathLenZero = true + } else { + certTemplate.MaxPathLen = data.Params.MaxPathLength + } + + switch data.Params.KeyType { + case "rsa": + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case "ec": + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + certTemplate.AuthorityKeyId = subjKeyID + certTemplate.BasicConstraintsValid = true + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) + } + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + if data.SigningBundle != nil { + if len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) { + + result.CAChain = []*CertBlock{ + &CertBlock{ + Certificate: data.SigningBundle.Certificate, + Bytes: data.SigningBundle.CertificateBytes, + }, + } + result.CAChain = append(result.CAChain, data.SigningBundle.CAChain...) + } + } + + return result, nil +} + +var oidExtensionBasicConstraints = []int{2, 5, 29, 19} + +// Creates a CSR. This is currently only meant for use when +// generating an intermediate certificate. +func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { + var err error + result := &ParsedCSRBundle{} + + if err := GeneratePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result); err != nil { + return nil, err + } + + // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + if addBasicConstraints { + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) + if err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} + } + ext := pkix.Extension{ + Id: oidExtensionBasicConstraints, + Value: val, + Critical: true, + } + csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) + } + + switch data.Params.KeyType { + case "rsa": + csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case "ec": + csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CSRBytes = csr + result.CSR, err = x509.ParseCertificateRequest(csr) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} + } + + return result, nil +} + +// Performs the heavy lifting of generating a certificate from a CSR. +// Returns a ParsedCertBundle sans private keys. +func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + switch { + case data == nil: + return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} + case data.Params == nil: + return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} + case data.SigningBundle == nil: + return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} + case data.CSR == nil: + return nil, errutil.UserError{Err: "nil csr given to signCertificate"} + } + + err := data.CSR.CheckSignature() + if err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } + + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(data.CSR.PublicKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + subjKeyID := sha1.Sum(marshaledKey) + + caCert := data.SigningBundle.Certificate + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: data.Params.Subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case ECPrivateKey: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + if data.Params.UseCSRValues { + certTemplate.Subject = data.CSR.Subject + certTemplate.Subject.ExtraNames = certTemplate.Subject.Names + + certTemplate.DNSNames = data.CSR.DNSNames + certTemplate.EmailAddresses = data.CSR.EmailAddresses + certTemplate.IPAddresses = data.CSR.IPAddresses + certTemplate.URIs = data.CSR.URIs + + for _, name := range data.CSR.Extensions { + if !name.Id.Equal(oidExtensionBasicConstraints) { + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) + } + } + + } else { + certTemplate.DNSNames = data.Params.DNSNames + certTemplate.EmailAddresses = data.Params.EmailAddresses + certTemplate.IPAddresses = data.Params.IPAddresses + certTemplate.URIs = data.Params.URIs + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + var certBytes []byte + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers + + if data.Params.IsCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = true + + if data.SigningBundle.Certificate.MaxPathLen == 0 && + data.SigningBundle.Certificate.MaxPathLenZero { + return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} + } + + certTemplate.MaxPathLen = data.Params.MaxPathLength + if certTemplate.MaxPathLen == 0 { + certTemplate.MaxPathLenZero = true + } + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + result.CAChain = data.SigningBundle.GetCAChain() + + return result, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go new file mode 100644 index 00000000..d9a77dcc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go @@ -0,0 +1,763 @@ +// Package certutil contains helper functions that are mostly used +// with the PKI backend but can be generally useful. Functionality +// includes helpers for converting a certificate/private key bundle +// between DER and PEM, printing certificate serial numbers, and more. +// +// Functionality specific to the PKI backend includes some types +// and helper methods to make requesting certificates from the +// backend easy. +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "net/url" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +const ( + PrivateKeyTypeP521 = "p521" +) + +// This can be one of a few key types so the different params may or may not be filled +type ClusterKeyParams struct { + Type string `json:"type" structs:"type" mapstructure:"type"` + X *big.Int `json:"x" structs:"x" mapstructure:"x"` + Y *big.Int `json:"y" structs:"y" mapstructure:"y"` + D *big.Int `json:"d" structs:"d" mapstructure:"d"` +} + +// Secret is used to attempt to unmarshal a Vault secret +// JSON response, as a convenience +type Secret struct { + Data map[string]interface{} `json:"data"` +} + +// PrivateKeyType holds a string representation of the type of private key (ec +// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial +// names rather than official names, to eliminate confusion +type PrivateKeyType string + +//Well-known PrivateKeyTypes +const ( + UnknownPrivateKey PrivateKeyType = "" + RSAPrivateKey PrivateKeyType = "rsa" + ECPrivateKey PrivateKeyType = "ec" +) + +// TLSUsage controls whether the intended usage of a *tls.Config +// returned from ParsedCertBundle.getTLSConfig is for server use, +// client use, or both, which affects which values are set +type TLSUsage int + +//Well-known TLSUsage types +const ( + TLSUnknown TLSUsage = 0 + TLSServer TLSUsage = 1 << iota + TLSClient +) + +//BlockType indicates the serialization format of the key +type BlockType string + +//Well-known formats +const ( + PKCS1Block BlockType = "RSA PRIVATE KEY" + PKCS8Block BlockType = "PRIVATE KEY" + ECBlock BlockType = "EC PRIVATE KEY" +) + +//ParsedPrivateKeyContainer allows common key setting for certs and CSRs +type ParsedPrivateKeyContainer interface { + SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) +} + +// CertBlock contains the DER-encoded certificate and the PEM +// block's byte array +type CertBlock struct { + Certificate *x509.Certificate + Bytes []byte +} + +// CertBundle contains a key type, a PEM-encoded private key, +// a PEM-encoded certificate, and a string-encoded serial number, +// returned from a successful Issue request +type CertBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` + SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` +} + +// ParsedCertBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate +type ParsedCertBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyFormat BlockType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CertificateBytes []byte + Certificate *x509.Certificate + CAChain []*CertBlock +} + +// CSRBundle contains a key type, a PEM-encoded private key, +// and a PEM-encoded CSR +type CSRBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` +} + +// ParsedCSRBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate request +type ParsedCSRBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CSRBytes []byte + CSR *x509.CertificateRequest +} + +// ToPEMBundle converts a string-based certificate bundle +// to a PEM-based string certificate bundle in trust path +// order, leaf certificate first +func (c *CertBundle) ToPEMBundle() string { + var result []string + + if len(c.PrivateKey) > 0 { + result = append(result, c.PrivateKey) + } + if len(c.Certificate) > 0 { + result = append(result, c.Certificate) + } + if len(c.CAChain) > 0 { + result = append(result, c.CAChain...) + } + + return strings.Join(result, "\n") +} + +// ToParsedCertBundle converts a string-based certificate bundle +// to a byte-based raw certificate bundle +func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { + result := &ParsedCertBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + + result.PrivateKeyBytes = pemBlock.Bytes + result.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type)) + + switch result.PrivateKeyFormat { + case ECBlock: + result.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey + case PKCS1Block: + c.PrivateKeyType, result.PrivateKeyType = RSAPrivateKey, RSAPrivateKey + case PKCS8Block: + t, err := getPKCS8Type(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)} + } + result.PrivateKeyType = t + switch t { + case ECPrivateKey: + c.PrivateKeyType = ECPrivateKey + case RSAPrivateKey: + c.PrivateKeyType = RSAPrivateKey + } + default: + return nil, errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)} + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.Certificate) > 0 { + pemBlock, _ = pem.Decode([]byte(c.Certificate)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CertificateBytes = pemBlock.Bytes + result.Certificate, err = x509.ParseCertificate(result.CertificateBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)} + } + } + switch { + case len(c.CAChain) > 0: + for _, cert := range c.CAChain { + pemBlock, _ := pem.Decode([]byte(cert)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // For backwards compatibility + case len(c.IssuingCA) > 0: + pemBlock, _ = pem.Decode([]byte(c.IssuingCA)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // Populate if it isn't there already + if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 { + c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":") + } + + return result, nil +} + +// ToCertBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { + result := &CertBundle{} + block := pem.Block{ + Type: "CERTIFICATE", + } + + if p.Certificate != nil { + result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":")) + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + block.Bytes = p.CertificateBytes + result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + for _, caCert := range p.CAChain { + block.Bytes = caCert.Bytes + certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + + result.CAChain = append(result.CAChain, certificate) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Type = string(p.PrivateKeyFormat) + block.Bytes = p.PrivateKeyBytes + result.PrivateKeyType = p.PrivateKeyType + + //Handle bundle not parsed by us + if block.Type == "" { + switch p.PrivateKeyType { + case ECPrivateKey: + block.Type = string(ECBlock) + case RSAPrivateKey: + block.Type = string(PKCS1Block) + } + } + + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// Verify checks if the parsed bundle is valid. It validates the public +// key of the certificate to the private key and checks the certificate trust +// chain for path issues. +func (p *ParsedCertBundle) Verify() error { + // If private key exists, check if it matches the public key of cert + if p.PrivateKey != nil && p.Certificate != nil { + equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public()) + if err != nil { + return errwrap.Wrapf("could not compare public and private keys: {{err}}", err) + } + if !equal { + return fmt.Errorf("public key of certificate does not match private key") + } + } + + certPath := p.GetCertificatePath() + if len(certPath) > 1 { + for i, caCert := range certPath[1:] { + if !caCert.Certificate.IsCA { + return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1) + } + if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) { + return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q)", + i+1, certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName) + } + } + } + + return nil +} + +// GetCertificatePath returns a slice of certificates making up a path, pulled +// from the parsed cert bundle +func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock { + var certPath []*CertBlock + + certPath = append(certPath, &CertBlock{ + Certificate: p.Certificate, + Bytes: p.CertificateBytes, + }) + + if len(p.CAChain) > 0 { + // Root CA puts itself in the chain + if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber { + certPath = append(certPath, p.CAChain...) + } + } + + return certPath +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCertBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyFormat { + case ECBlock: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case PKCS1Block: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case PKCS8Block: + if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil { + switch k := k.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey: + return k.(crypto.Signer), nil + default: + return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } + } + return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func getPKCS8Type(bs []byte) (PrivateKeyType, error) { + k, err := x509.ParsePKCS8PrivateKey(bs) + if err != nil { + return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + } + + switch k.(type) { + case *ecdsa.PrivateKey: + return ECPrivateKey, nil + case *rsa.PrivateKey: + return RSAPrivateKey, nil + default: + return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } +} + +// ToParsedCSRBundle converts a string-based CSR bundle +// to a byte-based raw CSR bundle +func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) { + result := &ParsedCSRBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + result.PrivateKeyBytes = pemBlock.Bytes + + switch BlockType(pemBlock.Type) { + case ECBlock: + result.PrivateKeyType = ECPrivateKey + case PKCS1Block: + result.PrivateKeyType = RSAPrivateKey + default: + // Try to figure it out and correct + if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = ECPrivateKey + c.PrivateKeyType = "ec" + } else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = RSAPrivateKey + c.PrivateKeyType = "rsa" + } else { + return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)} + } + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.CSR) > 0 { + pemBlock, _ = pem.Decode([]byte(c.CSR)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CSRBytes = pemBlock.Bytes + result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)} + } + } + + return result, nil +} + +// ToCSRBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) { + result := &CSRBundle{} + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + } + + if p.CSRBytes != nil && len(p.CSRBytes) > 0 { + block.Bytes = p.CSRBytes + result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + result.PrivateKeyType = "rsa" + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + result.PrivateKeyType = "ec" + block.Type = "EC PRIVATE KEY" + default: + return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} + } + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCSRBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyType { + case ECPrivateKey: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +// getTLSConfig returns a TLS config generally suitable for client +// authentication. The returned TLS config can be modified slightly +// to be made suitable for a server requiring client authentication; +// specifically, you should set the value of ClientAuth in the returned +// config to match your needs. +func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) { + tlsCert := tls.Certificate{ + Certificate: [][]byte{}, + } + + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if p.Certificate != nil { + tlsCert.Leaf = p.Certificate + } + + if p.PrivateKey != nil { + tlsCert.PrivateKey = p.PrivateKey + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes) + } + + if len(p.CAChain) > 0 { + for _, cert := range p.CAChain { + tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes) + } + + // Technically we only need one cert, but this doesn't duplicate code + certBundle, err := p.ToCertBundle() + if err != nil { + return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err) + } + + caPool := x509.NewCertPool() + ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0])) + if !ok { + return nil, fmt.Errorf("could not append CA certificate") + } + + if usage&TLSServer > 0 { + tlsConfig.ClientCAs = caPool + tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven + } + if usage&TLSClient > 0 { + tlsConfig.RootCAs = caPool + } + } + + if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { + tlsConfig.Certificates = []tls.Certificate{tlsCert} + tlsConfig.BuildNameToCertificate() + } + + return tlsConfig, nil +} + +// IssueData is a structure that is suitable for marshaling into a request; +// either via JSON, or into a map[string]interface{} via the structs package +type IssueData struct { + TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"` + CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"` + OU string `json:"ou" structs:"ou" mapstructure:"ou"` + AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"` + IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` +} + +type URLEntries struct { + IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` +} + +type CAInfoBundle struct { + ParsedCertBundle + URLs *URLEntries +} + +func (b *CAInfoBundle) GetCAChain() []*CertBlock { + chain := []*CertBlock{} + + // Include issuing CA in Chain, not including Root Authority + if (len(b.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || + (len(b.Certificate.AuthorityKeyId) == 0 && + !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { + + chain = append(chain, &CertBlock{ + Certificate: b.Certificate, + Bytes: b.CertificateBytes, + }) + if b.CAChain != nil && len(b.CAChain) > 0 { + chain = append(chain, b.CAChain...) + } + } + + return chain +} + +type CertExtKeyUsage int + +const ( + AnyExtKeyUsage CertExtKeyUsage = 1 << iota + ServerAuthExtKeyUsage + ClientAuthExtKeyUsage + CodeSigningExtKeyUsage + EmailProtectionExtKeyUsage + IpsecEndSystemExtKeyUsage + IpsecTunnelExtKeyUsage + IpsecUserExtKeyUsage + TimeStampingExtKeyUsage + OcspSigningExtKeyUsage + MicrosoftServerGatedCryptoExtKeyUsage + NetscapeServerGatedCryptoExtKeyUsage + MicrosoftCommercialCodeSigningExtKeyUsage + MicrosoftKernelCodeSigningExtKeyUsage +) + +type CreationParameters struct { + Subject pkix.Name + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + OtherSANs map[string][]string + IsCA bool + KeyType string + KeyBits int + NotAfter time.Time + KeyUsage x509.KeyUsage + ExtKeyUsage CertExtKeyUsage + ExtKeyUsageOIDs []string + PolicyIdentifiers []string + BasicConstraintsValidForNonCA bool + + // Only used when signing a CA cert + UseCSRValues bool + PermittedDNSDomains []string + + // URLs to encode into the certificate + URLs *URLEntries + + // The maximum path length to encode + MaxPathLength int + + // The duration the certificate will use NotBefore + NotBeforeDuration time.Duration +} + +type CreationBundle struct { + Params *CreationParameters + SigningBundle *CAInfoBundle + CSR *x509.CertificateRequest +} + +// addKeyUsages adds appropriate key usages to the template given the creation +// information +func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { + if data.Params.IsCA { + certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) + return + } + + certTemplate.KeyUsage = data.Params.KeyUsage + + if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) + } + + if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + + if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) + } + + if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) + } + + if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) + } + + if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) + } + + if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) + } + + if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) + } + + if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go index 769a7858..92b570ca 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go @@ -12,6 +12,10 @@ const ( // AuthHeaderName is the name of the header containing the token. AuthHeaderName = "X-Vault-Token" + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + // PerformanceReplicationALPN is the negotiated protocol used for // performance replication. PerformanceReplicationALPN = "replication_v1" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go new file mode 100644 index 00000000..a37086c6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go @@ -0,0 +1,11 @@ +package cryptoutil + +import "golang.org/x/crypto/blake2b" + +func Blake2b256Hash(key string) []byte { + hf, _ := blake2b.New256(nil) + + hf.Write([]byte(key)) + + return hf.Sum(nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go b/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go new file mode 100644 index 00000000..3337bd97 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go @@ -0,0 +1,63 @@ +package dbtxn + +import ( + "context" + "database/sql" + "fmt" + "strings" +) + +// ExecuteDBQuery handles executing one single statement, while properly releasing its resources. +// - ctx: Required +// - db: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, query string) error { + + parsedQuery := parseQuery(params, query) + + stmt, err := db.PrepareContext(ctx, parsedQuery) + if err != nil { + return err + } + defer stmt.Close() + + return execute(ctx, stmt) +} + +// ExecuteTxQuery handles executing one single statement, while properly releasing its resources. +// - ctx: Required +// - tx: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteTxQuery(ctx context.Context, tx *sql.Tx, params map[string]string, query string) error { + + parsedQuery := parseQuery(params, query) + + stmt, err := tx.PrepareContext(ctx, parsedQuery) + if err != nil { + return err + } + defer stmt.Close() + + return execute(ctx, stmt) +} + +func execute(ctx context.Context, stmt *sql.Stmt) error { + if _, err := stmt.ExecContext(ctx); err != nil { + return err + } + return nil +} + +func parseQuery(m map[string]string, tpl string) string { + + if m == nil || len(m) <= 0 { + return tpl + } + + for k, v := range m { + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) + } + return tpl +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go b/vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go new file mode 100644 index 00000000..afaef76e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go @@ -0,0 +1,37 @@ +package entropy + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +type Sourcer interface { + GetRandom(bytes int) ([]byte, error) +} + +type Reader struct { + source Sourcer +} + +func NewReader(source Sourcer) *Reader { + return &Reader{source} +} + +// Read reads exactly len(p) bytes from r into p. +// If r returns an error having read at least len(p) bytes, the error is dropped. +// It returns the number of bytes copied and an error if fewer bytes were read. +// On return, n == len(p) if and only if err == nil. +func (r *Reader) Read(p []byte) (n int, err error) { + requested := len(p) + randBytes, err := r.source.GetRandom(requested) + delivered := copy(p, randBytes) + if delivered != requested { + if err != nil { + return delivered, errwrap.Wrapf("unable to fill provided buffer with entropy: {{err}}", err) + } + return delivered, fmt.Errorf("unable to fill provided buffer with entropy") + } + + return delivered, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go new file mode 100644 index 00000000..0b95efb4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go @@ -0,0 +1,20 @@ +package errutil + +// UserError represents an error generated due to invalid user input +type UserError struct { + Err string +} + +func (e UserError) Error() string { + return e.Err +} + +// InternalError represents an error generated internally, +// presumably not due to invalid user input +type InternalError struct { + Err string +} + +func (e InternalError) Error() string { + return e.Err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/kdf/kdf.go b/vendor/github.com/hashicorp/vault/sdk/helper/kdf/kdf.go new file mode 100644 index 00000000..22e9f67b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/kdf/kdf.go @@ -0,0 +1,77 @@ +// This package is used to implement Key Derivation Functions (KDF) +// based on the recommendations of NIST SP 800-108. These are useful +// for generating unique-per-transaction keys, or situations in which +// a key hierarchy may be useful. +package kdf + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" +) + +// PRF is a pseudo-random function that takes a key or seed, +// as well as additional binary data and generates output that is +// indistinguishable from random. Examples are cryptographic hash +// functions or block ciphers. +type PRF func([]byte, []byte) ([]byte, error) + +// CounterMode implements the counter mode KDF that uses a pseudo-random-function (PRF) +// along with a counter to generate derived keys. The KDF takes a base key +// a derivation context, and the required number of output bits. +func CounterMode(prf PRF, prfLen uint32, key []byte, context []byte, bits uint32) ([]byte, error) { + // Ensure the PRF is byte aligned + if prfLen%8 != 0 { + return nil, fmt.Errorf("PRF must be byte aligned") + } + + // Ensure the bits required are byte aligned + if bits%8 != 0 { + return nil, fmt.Errorf("bits required must be byte aligned") + } + + // Determine the number of rounds required + rounds := bits / prfLen + if bits%prfLen != 0 { + rounds++ + } + + // Allocate and setup the input + input := make([]byte, 4+len(context)+4) + copy(input[4:], context) + binary.BigEndian.PutUint32(input[4+len(context):], bits) + + // Iteratively generate more key material + var out []byte + var i uint32 + for i = 0; i < rounds; i++ { + // Update the counter in the input string + binary.BigEndian.PutUint32(input[:4], i) + + // Compute more key material + part, err := prf(key, input) + if err != nil { + return nil, err + } + if uint32(len(part)*8) != prfLen { + return nil, fmt.Errorf("PRF length mis-match (%d vs %d)", len(part)*8, prfLen) + } + out = append(out, part...) + } + + // Return the desired number of output bytes + return out[:bits/8], nil +} + +const ( + // HMACSHA256PRFLen is the length of output from HMACSHA256PRF + HMACSHA256PRFLen uint32 = 256 +) + +// HMACSHA256PRF is a pseudo-random-function (PRF) that uses an HMAC-SHA256 +func HMACSHA256PRF(key []byte, data []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/cache.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/cache.go new file mode 100644 index 00000000..7da9c202 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/cache.go @@ -0,0 +1,8 @@ +package keysutil + +type Cache interface { + Delete(key interface{}) + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) + Size() int +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/consts.go new file mode 100644 index 00000000..59142a39 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/consts.go @@ -0,0 +1,50 @@ +package keysutil + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" +) + +type HashType uint32 + +const ( + _ = iota + HashTypeSHA1 HashType = iota + HashTypeSHA2224 + HashTypeSHA2256 + HashTypeSHA2384 + HashTypeSHA2512 +) + +type MarshalingType uint32 + +const ( + _ = iota + MarshalingTypeASN1 MarshalingType = iota + MarshalingTypeJWS +) + +var ( + HashTypeMap = map[string]HashType{ + "sha1": HashTypeSHA1, + "sha2-224": HashTypeSHA2224, + "sha2-256": HashTypeSHA2256, + "sha2-384": HashTypeSHA2384, + "sha2-512": HashTypeSHA2512, + } + + HashFuncMap = map[HashType]func() hash.Hash{ + HashTypeSHA1: sha1.New, + HashTypeSHA2224: sha256.New224, + HashTypeSHA2256: sha256.New, + HashTypeSHA2384: sha512.New384, + HashTypeSHA2512: sha512.New, + } + + MarshalingTypeMap = map[string]MarshalingType{ + "asn1": MarshalingTypeASN1, + "jws": MarshalingTypeJWS, + } +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/encrypted_key_storage.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/encrypted_key_storage.go new file mode 100644 index 00000000..90eaaf0b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/encrypted_key_storage.go @@ -0,0 +1,282 @@ +package keysutil + +import ( + "context" + "encoding/base64" + "errors" + "math/big" + paths "path" + "sort" + "strings" + + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for + // NewEncryptedKeyStorage. This value is the number of cache entries to + // store, not the size in bytes of the cache. + DefaultCacheSize = 16 * 1024 + + // DefaultPrefix is used if no prefix is specified for + // NewEncryptedKeyStorage. Prefix must be defined so we can provide context + // for the base folder. + DefaultPrefix = "encryptedkeys/" + + // EncryptedKeyPolicyVersionTpl is a template that can be used to minimize + // the amount of data that's stored with the ciphertext. + EncryptedKeyPolicyVersionTpl = "{{version}}:" +) + +var ( + // ErrPolicyDerivedKeys is returned if the provided policy does not use + // derived keys. This is a requirement for this storage implementation. + ErrPolicyDerivedKeys = errors.New("key policy must use derived keys") + + // ErrPolicyConvergentEncryption is returned if the provided policy does not use + // convergent encryption. This is a requirement for this storage implementation. + ErrPolicyConvergentEncryption = errors.New("key policy must use convergent encryption") + + // ErrPolicyConvergentVersion is returned if the provided policy does not use + // a new enough convergent version. This is a requirement for this storage + // implementation. + ErrPolicyConvergentVersion = errors.New("key policy must use convergent version > 2") + + // ErrNilStorage is returned if the provided storage is nil. + ErrNilStorage = errors.New("nil storage provided") + + // ErrNilPolicy is returned if the provided policy is nil. + ErrNilPolicy = errors.New("nil policy provided") +) + +// EncryptedKeyStorageConfig is used to configure an EncryptedKeyStorage object. +type EncryptedKeyStorageConfig struct { + // Policy is the key policy to use to encrypt the key paths. + Policy *Policy + + // Prefix is the storage prefix for this instance of the EncryptedKeyStorage + // object. This is stored in plaintext. If not set the DefaultPrefix will be + // used. + Prefix string + + // CacheSize is the number of elements to cache. If not set the + // DetaultCacheSize will be used. + CacheSize int +} + +// NewEncryptedKeyStorageWrapper takes an EncryptedKeyStorageConfig and returns a new +// EncryptedKeyStorage object. +func NewEncryptedKeyStorageWrapper(config EncryptedKeyStorageConfig) (*EncryptedKeyStorageWrapper, error) { + if config.Policy == nil { + return nil, ErrNilPolicy + } + + if !config.Policy.Derived { + return nil, ErrPolicyDerivedKeys + } + + if !config.Policy.ConvergentEncryption { + return nil, ErrPolicyConvergentEncryption + } + + if config.Prefix == "" { + config.Prefix = DefaultPrefix + } + + if !strings.HasSuffix(config.Prefix, "/") { + config.Prefix += "/" + } + + size := config.CacheSize + if size <= 0 { + size = DefaultCacheSize + } + + cache, err := lru.New2Q(size) + if err != nil { + return nil, err + } + + return &EncryptedKeyStorageWrapper{ + policy: config.Policy, + prefix: config.Prefix, + lru: cache, + }, nil +} + +type EncryptedKeyStorageWrapper struct { + policy *Policy + lru *lru.TwoQueueCache + prefix string +} + +func (f *EncryptedKeyStorageWrapper) Wrap(s logical.Storage) logical.Storage { + return &encryptedKeyStorage{ + policy: f.policy, + s: s, + prefix: f.prefix, + lru: f.lru, + } +} + +// EncryptedKeyStorage implements the logical.Storage interface and ensures the +// storage paths are encrypted in the underlying storage. +type encryptedKeyStorage struct { + policy *Policy + s logical.Storage + lru *lru.TwoQueueCache + + prefix string +} + +func ensureTailingSlash(path string) string { + if !strings.HasSuffix(path, "/") { + return path + "/" + } + return path +} + +// List implements the logical.Storage List method, and decrypts all the items +// in a path prefix. This can only operate on full folder structures so the +// prefix should end in a "/". +func (s *encryptedKeyStorage) List(ctx context.Context, prefix string) ([]string, error) { + var decoder big.Int + + encPrefix, err := s.encryptPath(prefix) + if err != nil { + return nil, err + } + + keys, err := s.s.List(ctx, ensureTailingSlash(encPrefix)) + if err != nil { + return keys, err + } + + decryptedKeys := make([]string, len(keys)) + + // The context for the decryption operations will be the object's prefix + // joined with the provided prefix. Join cleans the path ensuring there + // isn't a trailing "/". + context := []byte(paths.Join(s.prefix, prefix)) + + for i, k := range keys { + raw, ok := s.lru.Get(k) + if ok { + // cache HIT, we can bail early and skip the decode & decrypt operations. + decryptedKeys[i] = raw.(string) + continue + } + + // If a folder is included in the keys it will have a trailing "/". + // We need to remove this before decoding/decrypting and add it back + // later. + appendSlash := strings.HasSuffix(k, "/") + if appendSlash { + k = strings.TrimSuffix(k, "/") + } + + decoder.SetString(k, 62) + decoded := decoder.Bytes() + if len(decoded) == 0 { + return nil, errors.New("could not decode key") + } + + // Decrypt the data with the object's key policy. + encodedPlaintext, err := s.policy.Decrypt(context, nil, string(decoded[:])) + if err != nil { + return nil, err + } + + // The plaintext is still base64 encoded, decode it. + decoded, err = base64.StdEncoding.DecodeString(encodedPlaintext) + if err != nil { + return nil, err + } + + plaintext := string(decoded[:]) + + // Add the slash back to the plaintext value + if appendSlash { + plaintext += "/" + k += "/" + } + + // We want to store the unencoded version of the key in the cache. + // This will make it more performent when it's a HIT. + s.lru.Add(k, plaintext) + + decryptedKeys[i] = plaintext + } + + sort.Strings(decryptedKeys) + return decryptedKeys, nil +} + +// Get implements the logical.Storage Get method. +func (s *encryptedKeyStorage) Get(ctx context.Context, path string) (*logical.StorageEntry, error) { + encPath, err := s.encryptPath(path) + if err != nil { + return nil, err + } + + return s.s.Get(ctx, encPath) +} + +// Put implements the logical.Storage Put method. +func (s *encryptedKeyStorage) Put(ctx context.Context, entry *logical.StorageEntry) error { + encPath, err := s.encryptPath(entry.Key) + if err != nil { + return err + } + e := &logical.StorageEntry{} + *e = *entry + + e.Key = encPath + + return s.s.Put(ctx, e) +} + +// Delete implements the logical.Storage Delete method. +func (s *encryptedKeyStorage) Delete(ctx context.Context, path string) error { + encPath, err := s.encryptPath(path) + if err != nil { + return err + } + + return s.s.Delete(ctx, encPath) +} + +// encryptPath takes a plaintext path and encrypts each path section (separated +// by "/") with the object's key policy. The context for each encryption is the +// plaintext path prefix for the key. +func (s *encryptedKeyStorage) encryptPath(path string) (string, error) { + var encoder big.Int + + if path == "" || path == "/" { + return s.prefix, nil + } + + path = paths.Clean(path) + + // Trim the prefix if it starts with a "/" + path = strings.TrimPrefix(path, "/") + + parts := strings.Split(path, "/") + + encPath := s.prefix + context := strings.TrimSuffix(s.prefix, "/") + for _, p := range parts { + encoded := base64.StdEncoding.EncodeToString([]byte(p)) + ciphertext, err := s.policy.Encrypt(0, []byte(context), nil, encoded) + if err != nil { + return "", err + } + + encoder.SetBytes([]byte(ciphertext)) + encPath = paths.Join(encPath, encoder.Text(62)) + context = paths.Join(context, p) + } + + return encPath, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go new file mode 100644 index 00000000..ae41ba1e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go @@ -0,0 +1,479 @@ +package keysutil + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + shared = false + exclusive = true + currentConvergentVersion = 3 +) + +var ( + errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") +) + +// PolicyRequest holds values used when requesting a policy. Most values are +// only used during an upsert. +type PolicyRequest struct { + // The storage to use + Storage logical.Storage + + // The name of the policy + Name string + + // The key type + KeyType KeyType + + // Whether it should be derived + Derived bool + + // Whether to enable convergent encryption + Convergent bool + + // Whether to allow export + Exportable bool + + // Whether to upsert + Upsert bool + + // Whether to allow plaintext backup + AllowPlaintextBackup bool +} + +type LockManager struct { + useCache bool + cache Cache + keyLocks []*locksutil.LockEntry +} + +func NewLockManager(useCache bool, cacheSize int) (*LockManager, error) { + // determine the type of cache to create + var cache Cache + switch { + case !useCache: + case cacheSize < 0: + return nil, errors.New("cache size must be greater or equal to zero") + case cacheSize == 0: + cache = NewTransitSyncMap() + case cacheSize > 0: + newLRUCache, err := NewTransitLRU(cacheSize) + if err != nil { + return nil, errwrap.Wrapf("failed to create cache: {{err}}", err) + } + cache = newLRUCache + } + + lm := &LockManager{ + useCache: useCache, + cache: cache, + keyLocks: locksutil.CreateLocks(), + } + + return lm, nil +} + +func (lm *LockManager) GetCacheSize() int { + if !lm.useCache { + return 0 + } + return lm.cache.Size() +} + +func (lm *LockManager) GetUseCache() bool { + return lm.useCache +} + +func (lm *LockManager) InvalidatePolicy(name string) { + if lm.useCache { + lm.cache.Delete(name) + } +} + +// RestorePolicy acquires an exclusive lock on the policy name and restores the +// given policy along with the archive. +func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storage, name, backup string, force bool) error { + backupBytes, err := base64.StdEncoding.DecodeString(backup) + if err != nil { + return err + } + + var keyData KeyData + err = jsonutil.DecodeJSON(backupBytes, &keyData) + if err != nil { + return err + } + + // Set a different name if desired + if name != "" { + keyData.Policy.Name = name + } + + name = keyData.Policy.Name + + // Grab the exclusive lock as we'll be modifying disk + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + var ok bool + var pRaw interface{} + + // If the policy is in cache and 'force' is not specified, error out. Anywhere + // that would put it in the cache will also be protected by the mutex above, + // so we don't need to re-check the cache later. + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + if ok && !force { + return fmt.Errorf("key %q already exists", name) + } + } + + // Conditionally look up the policy from storage, depending on the use of + // 'force' and if the policy was found in cache. + // + // - If was not found in cache and we are not using 'force', look for it in + // storage. If found, error out. + // + // - If it was found in cache and we are using 'force', pRaw will not be nil + // and we do not look the policy up from storage + // + // - If it was found in cache and we are not using 'force', we should have + // returned above with error + var p *Policy + if pRaw == nil { + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return err + } + if p != nil && !force { + return fmt.Errorf("key %q already exists", name) + } + } + + // If both pRaw and p above are nil and 'force' is specified, we don't need to + // grab policy locks as we have ensured it doesn't already exist, so there + // will be no races as nothing else has this pointer. If 'force' was not used, + // an error would have been returned by now if the policy already existed + if pRaw != nil { + p = pRaw.(*Policy) + } + if p != nil { + p.l.Lock() + defer p.l.Unlock() + } + + // Restore the archived keys + if keyData.ArchivedKeys != nil { + err = keyData.Policy.storeArchive(ctx, storage, keyData.ArchivedKeys) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to restore archived keys for key %q: {{err}}", name), err) + } + } + + // Mark that policy as a restored key + keyData.Policy.RestoreInfo = &RestoreInfo{ + Time: time.Now(), + Version: keyData.Policy.LatestVersion, + } + + // Restore the policy. This will also attempt to adjust the archive. + err = keyData.Policy.Persist(ctx, storage) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to restore the policy %q: {{err}}", name), err) + } + + keyData.Policy.l = new(sync.RWMutex) + + // Update the cache to contain the restored policy + if lm.useCache { + lm.cache.Store(name, keyData.Policy) + } + return nil +} + +func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage, name string) (string, error) { + var p *Policy + var err error + + // Backup writes information about when the backup took place, so we get an + // exclusive lock here + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + var ok bool + var pRaw interface{} + + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } + if ok { + p = pRaw.(*Policy) + p.l.Lock() + defer p.l.Unlock() + } else { + // If the policy doesn't exit in storage, error out + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return "", err + } + if p == nil { + return "", fmt.Errorf(fmt.Sprintf("key %q not found", name)) + } + } + + if atomic.LoadUint32(&p.deleted) == 1 { + return "", fmt.Errorf(fmt.Sprintf("key %q not found", name)) + } + + backup, err := p.Backup(ctx, storage) + if err != nil { + return "", err + } + + return backup, nil +} + +// When the function returns, if caching was disabled, the Policy's lock must +// be unlocked when the caller is done (and it should not be re-locked). +func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io.Reader) (retP *Policy, retUpserted bool, retErr error) { + var p *Policy + var err error + var ok bool + var pRaw interface{} + + // Check if it's in our cache. If so, return right away. + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } + if ok { + p = pRaw.(*Policy) + if atomic.LoadUint32(&p.deleted) == 1 { + return nil, false, nil + } + return p, false, nil + } + + // We're not using the cache, or it wasn't found; get an exclusive lock. + // This ensures that any other process writing the actual storage will be + // finished before we load from storage. + lock := locksutil.LockForKey(lm.keyLocks, req.Name) + lock.Lock() + + // If we are using the cache, defer the lock unlock; otherwise we will + // return from here with the lock still held. + cleanup := func() { + switch { + // If using the cache we always unlock, the caller locks the policy + // themselves + case lm.useCache: + lock.Unlock() + + // If not using the cache, if we aren't returning a policy the caller + // doesn't have a lock, so we must unlock + case retP == nil: + lock.Unlock() + } + } + + // Check the cache again + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } + if ok { + p = pRaw.(*Policy) + if atomic.LoadUint32(&p.deleted) == 1 { + cleanup() + return nil, false, nil + } + retP = p + cleanup() + return + } + + // Load it from storage + p, err = lm.getPolicyFromStorage(ctx, req.Storage, req.Name) + if err != nil { + cleanup() + return nil, false, err + } + // We don't need to lock the policy as there would be no other holders of + // the pointer + + if p == nil { + // This is the only place we upsert a new policy, so if upsert is not + // specified, or the lock type is wrong, unlock before returning + if !req.Upsert { + cleanup() + return nil, false, nil + } + + // We create the policy here, then at the end we do a LoadOrStore. If + // it's been loaded since we last checked the cache, we return an error + // to the user to let them know that their request can't be satisfied + // because we don't know if the parameters match. + + switch req.KeyType { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + if req.Convergent && !req.Derived { + cleanup() + return nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled") + } + + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_ED25519: + if req.Convergent { + cleanup() + return nil, false, fmt.Errorf("convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_RSA2048, KeyType_RSA4096: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + default: + cleanup() + return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) + } + + p = &Policy{ + l: new(sync.RWMutex), + Name: req.Name, + Type: req.KeyType, + Derived: req.Derived, + Exportable: req.Exportable, + AllowPlaintextBackup: req.AllowPlaintextBackup, + } + + if req.Derived { + p.KDF = Kdf_hkdf_sha256 + if req.Convergent { + p.ConvergentEncryption = true + // As of version 3 we store the version within each key, so we + // set to -1 to indicate that the value in the policy has no + // meaning. We still, for backwards compatibility, fall back to + // this value if the key doesn't have one, which means it will + // only be -1 in the case where every key version is >= 3 + p.ConvergentVersion = -1 + } + } + + // Performs the actual persist and does setup + err = p.Rotate(ctx, req.Storage, rand) + if err != nil { + cleanup() + return nil, false, err + } + + if lm.useCache { + lm.cache.Store(req.Name, p) + } else { + p.l = &lock.RWMutex + p.writeLocked = true + } + + // We don't need to worry about upgrading since it will be a new policy + retP = p + retUpserted = true + cleanup() + return + } + + if p.NeedsUpgrade() { + if err := p.Upgrade(ctx, req.Storage, rand); err != nil { + cleanup() + return nil, false, err + } + } + + if lm.useCache { + lm.cache.Store(req.Name, p) + } else { + p.l = &lock.RWMutex + p.writeLocked = true + } + + retP = p + cleanup() + return +} + +func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage, name string) error { + var p *Policy + var err error + var ok bool + var pRaw interface{} + + // We may be writing to disk, so grab an exclusive lock. This prevents bad + // behavior when the cache is turned off. We also lock the shared policy + // object to make sure no requests are in flight. + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } + if ok { + p = pRaw.(*Policy) + p.l.Lock() + defer p.l.Unlock() + } + + if p == nil { + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return err + } + if p == nil { + return fmt.Errorf("could not delete key; not found") + } + } + + if !p.DeletionAllowed { + return fmt.Errorf("deletion is not allowed for this key") + } + + atomic.StoreUint32(&p.deleted, 1) + + if lm.useCache { + lm.cache.Delete(name) + } + + err = storage.Delete(ctx, "policy/"+name) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error deleting key %q: {{err}}", name), err) + } + + err = storage.Delete(ctx, "archive/"+name) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error deleting key %q archive: {{err}}", name), err) + } + + return nil +} + +func (lm *LockManager) getPolicyFromStorage(ctx context.Context, storage logical.Storage, name string) (*Policy, error) { + return LoadPolicy(ctx, storage, "policy/"+name) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go new file mode 100644 index 00000000..b46b6844 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go @@ -0,0 +1,1592 @@ +package keysutil + +import ( + "bytes" + "context" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "path" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/hkdf" + + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/kdf" + "github.com/hashicorp/vault/sdk/logical" +) + +// Careful with iota; don't put anything before it in this const block because +// we need the default of zero to be the old-style KDF +const ( + Kdf_hmac_sha256_counter = iota // built-in helper + Kdf_hkdf_sha256 // golang.org/x/crypto/hkdf +) + +// Or this one...we need the default of zero to be the original AES256-GCM96 +const ( + KeyType_AES256_GCM96 = iota + KeyType_ECDSA_P256 + KeyType_ED25519 + KeyType_RSA2048 + KeyType_RSA4096 + KeyType_ChaCha20_Poly1305 + KeyType_ECDSA_P384 + KeyType_ECDSA_P521 + KeyType_AES128_GCM96 +) + +const ( + // ErrTooOld is returned whtn the ciphertext or signatures's key version is + // too old. + ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)" + + // DefaultVersionTemplate is used when no version template is provided. + DefaultVersionTemplate = "vault:v{{version}}:" +) + +type RestoreInfo struct { + Time time.Time `json:"time"` + Version int `json:"version"` +} + +type BackupInfo struct { + Time time.Time `json:"time"` + Version int `json:"version"` +} + +type SigningResult struct { + Signature string + PublicKey []byte +} + +type ecdsaSignature struct { + R, S *big.Int +} + +type KeyType int + +func (kt KeyType) EncryptionSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA4096: + return true + } + return false +} + +func (kt KeyType) DecryptionSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA4096: + return true + } + return false +} + +func (kt KeyType) SigningSupported() bool { + switch kt { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA4096: + return true + } + return false +} + +func (kt KeyType) HashSignatureInput() bool { + switch kt { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA4096: + return true + } + return false +} + +func (kt KeyType) DerivationSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_ED25519: + return true + } + return false +} + +func (kt KeyType) String() string { + switch kt { + case KeyType_AES128_GCM96: + return "aes128-gcm96" + case KeyType_AES256_GCM96: + return "aes256-gcm96" + case KeyType_ChaCha20_Poly1305: + return "chacha20-poly1305" + case KeyType_ECDSA_P256: + return "ecdsa-p256" + case KeyType_ECDSA_P384: + return "ecdsa-p384" + case KeyType_ECDSA_P521: + return "ecdsa-p521" + case KeyType_ED25519: + return "ed25519" + case KeyType_RSA2048: + return "rsa-2048" + case KeyType_RSA4096: + return "rsa-4096" + } + + return "[unknown]" +} + +type KeyData struct { + Policy *Policy `json:"policy"` + ArchivedKeys *archivedKeys `json:"archived_keys"` +} + +// KeyEntry stores the key and metadata +type KeyEntry struct { + // AES or some other kind that is a pure byte slice like ED25519 + Key []byte `json:"key"` + + // Key used for HMAC functions + HMACKey []byte `json:"hmac_key"` + + // Time of creation + CreationTime time.Time `json:"time"` + + EC_X *big.Int `json:"ec_x"` + EC_Y *big.Int `json:"ec_y"` + EC_D *big.Int `json:"ec_d"` + + RSAKey *rsa.PrivateKey `json:"rsa_key"` + + // The public key in an appropriate format for the type of key + FormattedPublicKey string `json:"public_key"` + + // If convergent is enabled, the version (falling back to what's in the + // policy) + ConvergentVersion int `json:"convergent_version"` + + // This is deprecated (but still filled) in favor of the value above which + // is more precise + DeprecatedCreationTime int64 `json:"creation_time"` +} + +// deprecatedKeyEntryMap is used to allow JSON marshal/unmarshal +type deprecatedKeyEntryMap map[int]KeyEntry + +// MarshalJSON implements JSON marshaling +func (kem deprecatedKeyEntryMap) MarshalJSON() ([]byte, error) { + intermediate := map[string]KeyEntry{} + for k, v := range kem { + intermediate[strconv.Itoa(k)] = v + } + return json.Marshal(&intermediate) +} + +// MarshalJSON implements JSON unmarshalling +func (kem deprecatedKeyEntryMap) UnmarshalJSON(data []byte) error { + intermediate := map[string]KeyEntry{} + if err := jsonutil.DecodeJSON(data, &intermediate); err != nil { + return err + } + for k, v := range intermediate { + keyval, err := strconv.Atoi(k) + if err != nil { + return err + } + kem[keyval] = v + } + + return nil +} + +// keyEntryMap is used to allow JSON marshal/unmarshal +type keyEntryMap map[string]KeyEntry + +// PolicyConfig is used to create a new policy +type PolicyConfig struct { + // The name of the policy + Name string `json:"name"` + + // The type of key + Type KeyType + + // Derived keys MUST provide a context and the master underlying key is + // never used. + Derived bool + KDF int + ConvergentEncryption bool + + // Whether the key is exportable + Exportable bool + + // Whether the key is allowed to be deleted + DeletionAllowed bool + + // AllowPlaintextBackup allows taking backup of the policy in plaintext + AllowPlaintextBackup bool + + // VersionTemplate is used to prefix the ciphertext with information about + // the key version. It must inclide {{version}} and a delimiter between the + // version prefix and the ciphertext. + VersionTemplate string + + // StoragePrefix is used to add a prefix when storing and retrieving the + // policy object. + StoragePrefix string +} + +// NewPolicy takes a policy config and returns a Policy with those settings. +func NewPolicy(config PolicyConfig) *Policy { + return &Policy{ + l: new(sync.RWMutex), + Name: config.Name, + Type: config.Type, + Derived: config.Derived, + KDF: config.KDF, + ConvergentEncryption: config.ConvergentEncryption, + ConvergentVersion: -1, + Exportable: config.Exportable, + DeletionAllowed: config.DeletionAllowed, + AllowPlaintextBackup: config.AllowPlaintextBackup, + VersionTemplate: config.VersionTemplate, + StoragePrefix: config.StoragePrefix, + } +} + +// LoadPolicy will load a policy from the provided storage path and set the +// necessary un-exported variables. It is particularly useful when accessing a +// policy without the lock manager. +func LoadPolicy(ctx context.Context, s logical.Storage, path string) (*Policy, error) { + raw, err := s.Get(ctx, path) + if err != nil { + return nil, err + } + if raw == nil { + return nil, nil + } + + var policy Policy + err = jsonutil.DecodeJSON(raw.Value, &policy) + if err != nil { + return nil, err + } + + policy.l = new(sync.RWMutex) + + return &policy, nil +} + +// Policy is the struct used to store metadata +type Policy struct { + // This is a pointer on purpose: if we are running with cache disabled we + // need to actually swap in the lock manager's lock for this policy with + // the local lock. + l *sync.RWMutex + // writeLocked allows us to implement Lock() and Unlock() + writeLocked bool + // Stores whether it's been deleted. This acts as a guard for operations + // that may write data, e.g. if one request rotates and that request is + // served after a delete. + deleted uint32 + + Name string `json:"name"` + Key []byte `json:"key,omitempty"` //DEPRECATED + Keys keyEntryMap `json:"keys"` + + // Derived keys MUST provide a context and the master underlying key is + // never used. If convergent encryption is true, the context will be used + // as the nonce as well. + Derived bool `json:"derived"` + KDF int `json:"kdf"` + ConvergentEncryption bool `json:"convergent_encryption"` + + // Whether the key is exportable + Exportable bool `json:"exportable"` + + // The minimum version of the key allowed to be used for decryption + MinDecryptionVersion int `json:"min_decryption_version"` + + // The minimum version of the key allowed to be used for encryption + MinEncryptionVersion int `json:"min_encryption_version"` + + // The latest key version in this policy + LatestVersion int `json:"latest_version"` + + // The latest key version in the archive. We never delete these, so this is + // a max. + ArchiveVersion int `json:"archive_version"` + + // ArchiveMinVersion is the minimum version of the key in the archive. + ArchiveMinVersion int `json:"archive_min_version"` + + // MinAvailableVersion is the minimum version of the key present. All key + // versions before this would have been deleted. + MinAvailableVersion int `json:"min_available_version"` + + // Whether the key is allowed to be deleted + DeletionAllowed bool `json:"deletion_allowed"` + + // The version of the convergent nonce to use + ConvergentVersion int `json:"convergent_version"` + + // The type of key + Type KeyType `json:"type"` + + // BackupInfo indicates the information about the backup action taken on + // this policy + BackupInfo *BackupInfo `json:"backup_info"` + + // RestoreInfo indicates the information about the restore action taken on + // this policy + RestoreInfo *RestoreInfo `json:"restore_info"` + + // AllowPlaintextBackup allows taking backup of the policy in plaintext + AllowPlaintextBackup bool `json:"allow_plaintext_backup"` + + // VersionTemplate is used to prefix the ciphertext with information about + // the key version. It must inclide {{version}} and a delimiter between the + // version prefix and the ciphertext. + VersionTemplate string `json:"version_template"` + + // StoragePrefix is used to add a prefix when storing and retrieving the + // policy object. + StoragePrefix string `json:"storage_prefix"` + + // versionPrefixCache stores caches of version prefix strings and the split + // version template. + versionPrefixCache sync.Map +} + +func (p *Policy) Lock(exclusive bool) { + if exclusive { + p.l.Lock() + p.writeLocked = true + } else { + p.l.RLock() + } +} + +func (p *Policy) Unlock() { + if p.writeLocked { + p.writeLocked = false + p.l.Unlock() + } else { + p.l.RUnlock() + } +} + +// ArchivedKeys stores old keys. This is used to keep the key loading time sane +// when there are huge numbers of rotations. +type archivedKeys struct { + Keys []KeyEntry `json:"keys"` +} + +func (p *Policy) LoadArchive(ctx context.Context, storage logical.Storage) (*archivedKeys, error) { + archive := &archivedKeys{} + + raw, err := storage.Get(ctx, path.Join(p.StoragePrefix, "archive", p.Name)) + if err != nil { + return nil, err + } + if raw == nil { + archive.Keys = make([]KeyEntry, 0) + return archive, nil + } + + if err := jsonutil.DecodeJSON(raw.Value, archive); err != nil { + return nil, err + } + + return archive, nil +} + +func (p *Policy) storeArchive(ctx context.Context, storage logical.Storage, archive *archivedKeys) error { + // Encode the policy + buf, err := json.Marshal(archive) + if err != nil { + return err + } + + // Write the policy into storage + err = storage.Put(ctx, &logical.StorageEntry{ + Key: path.Join(p.StoragePrefix, "archive", p.Name), + Value: buf, + }) + if err != nil { + return err + } + + return nil +} + +// handleArchiving manages the movement of keys to and from the policy archive. +// This should *ONLY* be called from Persist() since it assumes that the policy +// will be persisted afterwards. +func (p *Policy) handleArchiving(ctx context.Context, storage logical.Storage) error { + // We need to move keys that are no longer accessible to archivedKeys, and keys + // that now need to be accessible back here. + // + // For safety, because there isn't really a good reason to, we never delete + // keys from the archive even when we move them back. + + // Check if we have the latest minimum version in the current set of keys + _, keysContainsMinimum := p.Keys[strconv.Itoa(p.MinDecryptionVersion)] + + // Sanity checks + switch { + case p.MinDecryptionVersion < 1: + return fmt.Errorf("minimum decryption version of %d is less than 1", p.MinDecryptionVersion) + case p.LatestVersion < 1: + return fmt.Errorf("latest version of %d is less than 1", p.LatestVersion) + case !keysContainsMinimum && p.ArchiveVersion != p.LatestVersion: + return fmt.Errorf("need to move keys from archive but archive version not up-to-date") + case p.ArchiveVersion > p.LatestVersion: + return fmt.Errorf("archive version of %d is greater than the latest version %d", + p.ArchiveVersion, p.LatestVersion) + case p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion: + return fmt.Errorf("minimum decryption version of %d is greater than minimum encryption version %d", + p.MinDecryptionVersion, p.MinEncryptionVersion) + case p.MinDecryptionVersion > p.LatestVersion: + return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d", + p.MinDecryptionVersion, p.LatestVersion) + } + + archive, err := p.LoadArchive(ctx, storage) + if err != nil { + return err + } + + if !keysContainsMinimum { + // Need to move keys *from* archive + for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ { + p.Keys[strconv.Itoa(i)] = archive.Keys[i-p.MinAvailableVersion] + } + + return nil + } + + // Need to move keys *to* archive + + // We need a size that is equivalent to the latest version (number of keys) + // but adding one since slice numbering starts at 0 and we're indexing by + // key version + if len(archive.Keys)+p.MinAvailableVersion < p.LatestVersion+1 { + // Increase the size of the archive slice + newKeys := make([]KeyEntry, p.LatestVersion-p.MinAvailableVersion+1) + copy(newKeys, archive.Keys) + archive.Keys = newKeys + } + + // We are storing all keys in the archive, so we ensure that it is up to + // date up to p.LatestVersion + for i := p.ArchiveVersion + 1; i <= p.LatestVersion; i++ { + archive.Keys[i-p.MinAvailableVersion] = p.Keys[strconv.Itoa(i)] + p.ArchiveVersion = i + } + + // Trim the keys if required + if p.ArchiveMinVersion < p.MinAvailableVersion { + archive.Keys = archive.Keys[p.MinAvailableVersion-p.ArchiveMinVersion:] + p.ArchiveMinVersion = p.MinAvailableVersion + } + + err = p.storeArchive(ctx, storage, archive) + if err != nil { + return err + } + + // Perform deletion afterwards so that if there is an error saving we + // haven't messed with the current policy + for i := p.LatestVersion - len(p.Keys) + 1; i < p.MinDecryptionVersion; i++ { + delete(p.Keys, strconv.Itoa(i)) + } + + return nil +} + +func (p *Policy) Persist(ctx context.Context, storage logical.Storage) (retErr error) { + if atomic.LoadUint32(&p.deleted) == 1 { + return errors.New("key has been deleted, not persisting") + } + + // Other functions will take care of restoring other values; this is just + // responsible for archiving and keys since the archive function can modify + // keys. At the moment one of the other functions calling persist will also + // roll back keys, but better safe than sorry and this doesn't happen + // enough to worry about the speed tradeoff. + priorArchiveVersion := p.ArchiveVersion + var priorKeys keyEntryMap + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.ArchiveVersion = priorArchiveVersion + p.Keys = priorKeys + } + }() + + err := p.handleArchiving(ctx, storage) + if err != nil { + return err + } + + // Encode the policy + buf, err := p.Serialize() + if err != nil { + return err + } + + // Write the policy into storage + err = storage.Put(ctx, &logical.StorageEntry{ + Key: path.Join(p.StoragePrefix, "policy", p.Name), + Value: buf, + }) + if err != nil { + return err + } + + return nil +} + +func (p *Policy) Serialize() ([]byte, error) { + return json.Marshal(p) +} + +func (p *Policy) NeedsUpgrade() bool { + // Ensure we've moved from Key -> Keys + if p.Key != nil && len(p.Key) > 0 { + return true + } + + // With archiving, past assumptions about the length of the keys map are no + // longer valid + if p.LatestVersion == 0 && len(p.Keys) != 0 { + return true + } + + // We disallow setting the version to 0, since they start at 1 since moving + // to rotate-able keys, so update if it's set to 0 + if p.MinDecryptionVersion == 0 { + return true + } + + // On first load after an upgrade, copy keys to the archive + if p.ArchiveVersion == 0 { + return true + } + + // Need to write the version if zero; for version 3 on we set this to -1 to + // ignore it since we store this information in each key entry + if p.ConvergentEncryption && p.ConvergentVersion == 0 { + return true + } + + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + return true + } + + return false +} + +func (p *Policy) Upgrade(ctx context.Context, storage logical.Storage, randReader io.Reader) (retErr error) { + priorKey := p.Key + priorLatestVersion := p.LatestVersion + priorMinDecryptionVersion := p.MinDecryptionVersion + priorConvergentVersion := p.ConvergentVersion + var priorKeys keyEntryMap + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.Key = priorKey + p.LatestVersion = priorLatestVersion + p.MinDecryptionVersion = priorMinDecryptionVersion + p.ConvergentVersion = priorConvergentVersion + p.Keys = priorKeys + } + }() + + persistNeeded := false + // Ensure we've moved from Key -> Keys + if p.Key != nil && len(p.Key) > 0 { + p.MigrateKeyToKeysMap() + persistNeeded = true + } + + // With archiving, past assumptions about the length of the keys map are no + // longer valid + if p.LatestVersion == 0 && len(p.Keys) != 0 { + p.LatestVersion = len(p.Keys) + persistNeeded = true + } + + // We disallow setting the version to 0, since they start at 1 since moving + // to rotate-able keys, so update if it's set to 0 + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + persistNeeded = true + } + + // On first load after an upgrade, copy keys to the archive + if p.ArchiveVersion == 0 { + persistNeeded = true + } + + if p.ConvergentEncryption && p.ConvergentVersion == 0 { + p.ConvergentVersion = 1 + persistNeeded = true + } + + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + entry := p.Keys[strconv.Itoa(p.LatestVersion)] + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + persistNeeded = true + } + + if persistNeeded { + err := p.Persist(ctx, storage) + if err != nil { + return err + } + } + + return nil +} + +// DeriveKey is used to derive the encryption key that should be used depending +// on the policy. If derivation is disabled the raw key is used and no context +// is required, otherwise the KDF mode is used with the context to derive the +// proper key. +func (p *Policy) DeriveKey(context []byte, ver, numBytes int) ([]byte, error) { + // Fast-path non-derived keys + if !p.Derived { + return p.Keys[strconv.Itoa(ver)].Key, nil + } + + if !p.Type.DerivationSupported() { + return nil, errutil.UserError{Err: fmt.Sprintf("derivation not supported for key type %v", p.Type)} + } + + if p.Keys == nil || p.LatestVersion == 0 { + return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"} + } + + if ver <= 0 || ver > p.LatestVersion { + return nil, errutil.UserError{Err: "invalid key version"} + } + + // Ensure a context is provided + if len(context) == 0 { + return nil, errutil.UserError{Err: "missing 'context' for key derivation; the key was created using a derived key, which means additional, per-request information must be included in order to perform operations with the key"} + } + + switch p.KDF { + case Kdf_hmac_sha256_counter: + prf := kdf.HMACSHA256PRF + prfLen := kdf.HMACSHA256PRFLen + return kdf.CounterMode(prf, prfLen, p.Keys[strconv.Itoa(ver)].Key, context, 256) + + case Kdf_hkdf_sha256: + reader := hkdf.New(sha256.New, p.Keys[strconv.Itoa(ver)].Key, nil, context) + derBytes := bytes.NewBuffer(nil) + derBytes.Grow(numBytes) + limReader := &io.LimitedReader{ + R: reader, + N: int64(numBytes), + } + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + n, err := derBytes.ReadFrom(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)} + } + if n != int64(numBytes) { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed %d, got %d", numBytes, n)} + } + return derBytes.Bytes(), nil + + case KeyType_ED25519: + // We use the limited reader containing the derived bytes as the + // "random" input to the generation function + _, pri, err := ed25519.GenerateKey(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating derived key: %v", err)} + } + return pri, nil + + default: + return nil, errutil.InternalError{Err: "unsupported key type for derivation"} + } + + default: + return nil, errutil.InternalError{Err: "unsupported key derivation mode"} + } +} + +func (p *Policy) convergentVersion(ver int) int { + if !p.ConvergentEncryption { + return 0 + } + + convergentVersion := p.ConvergentVersion + if convergentVersion == 0 { + // For some reason, not upgraded yet + convergentVersion = 1 + } + currKey := p.Keys[strconv.Itoa(ver)] + if currKey.ConvergentVersion != 0 { + convergentVersion = currKey.ConvergentVersion + } + + return convergentVersion +} + +func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) { + if !p.Type.EncryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} + } + + // Decode the plaintext value + plaintext, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return "", errutil.UserError{Err: err.Error()} + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for encryption is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"} + case ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} + } + + var ciphertext []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + hmacKey := context + + var aead cipher.AEAD + var encKey []byte + var deriveHMAC bool + + encBytes := 32 + hmacBytes := 0 + if p.convergentVersion(ver) > 2 { + deriveHMAC = true + hmacBytes = 32 + } + if p.Type == KeyType_AES128_GCM96 { + encBytes = 16 + } + + key, err := p.DeriveKey(context, ver, encBytes+hmacBytes) + if err != nil { + return "", err + } + + if len(key) < encBytes+hmacBytes { + return "", errutil.InternalError{Err: "could not derive key, length too small"} + } + + encKey = key[:encBytes] + if len(encKey) != encBytes { + return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} + } + if deriveHMAC { + hmacKey = key[encBytes:] + if len(hmacKey) != hmacBytes { + return "", errutil.InternalError{Err: "could not derive hmac key, length not correct"} + } + } + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96: + // Setup the cipher + aesCipher, err := aes.NewCipher(encKey) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + aead = gcm + + case KeyType_ChaCha20_Poly1305: + cha, err := chacha20poly1305.New(encKey) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + aead = cha + } + + if p.ConvergentEncryption { + convergentVersion := p.convergentVersion(ver) + switch convergentVersion { + case 1: + if len(nonce) != aead.NonceSize() { + return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", aead.NonceSize())} + } + case 2, 3: + if len(hmacKey) == 0 { + return "", errutil.InternalError{Err: fmt.Sprintf("invalid hmac key length of zero")} + } + nonceHmac := hmac.New(sha256.New, hmacKey) + nonceHmac.Write(plaintext) + nonceSum := nonceHmac.Sum(nil) + nonce = nonceSum[:aead.NonceSize()] + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unhandled convergent version %d", convergentVersion)} + } + } else { + // Compute random nonce + nonce, err = uuid.GenerateRandomBytes(aead.NonceSize()) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + } + + // Encrypt and tag with AEAD + ciphertext = aead.Seal(nil, nonce, plaintext, nil) + + // Place the encrypted data after the nonce + if !p.ConvergentEncryption || p.convergentVersion(ver) > 1 { + ciphertext = append(nonce, ciphertext...) + } + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[strconv.Itoa(ver)].RSAKey + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + // Convert to base64 + encoded := base64.StdEncoding.EncodeToString(ciphertext) + + // Prepend some information + encoded = p.getVersionPrefix(ver) + encoded + + return encoded, nil +} + +func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { + if !p.Type.DecryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)} + } + + tplParts, err := p.getTemplateParts() + if err != nil { + return "", err + } + + // Verify the prefix + if !strings.HasPrefix(value, tplParts[0]) { + return "", errutil.UserError{Err: "invalid ciphertext: no prefix"} + } + + splitVerCiphertext := strings.SplitN(strings.TrimPrefix(value, tplParts[0]), tplParts[1], 2) + if len(splitVerCiphertext) != 2 { + return "", errutil.UserError{Err: "invalid ciphertext: wrong number of fields"} + } + + ver, err := strconv.Atoi(splitVerCiphertext[0]) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: version number could not be decoded"} + } + + if ver == 0 { + // Compatibility mode with initial implementation, where keys start at + // zero + ver = 1 + } + + if ver > p.LatestVersion { + return "", errutil.UserError{Err: "invalid ciphertext: version is too new"} + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return "", errutil.UserError{Err: ErrTooOld} + } + + convergentVersion := p.convergentVersion(ver) + if convergentVersion == 1 && (nonce == nil || len(nonce) == 0) { + return "", errutil.UserError{Err: "invalid convergent nonce supplied"} + } + + // Decode the base64 + decoded, err := base64.StdEncoding.DecodeString(splitVerCiphertext[1]) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: could not decode base64"} + } + + var plain []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + var aead cipher.AEAD + + numBytes := 32 + if p.Type == KeyType_AES128_GCM96 { + numBytes = 16 + } + + encKey, err := p.DeriveKey(context, ver, numBytes) + if err != nil { + return "", err + } + + if len(encKey) != numBytes { + return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} + } + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96: + // Setup the cipher + aesCipher, err := aes.NewCipher(encKey) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + aead = gcm + + case KeyType_ChaCha20_Poly1305: + cha, err := chacha20poly1305.New(encKey) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + aead = cha + } + + if len(decoded) < aead.NonceSize() { + return "", errutil.UserError{Err: "invalid ciphertext length"} + } + + // Extract the nonce and ciphertext + var ciphertext []byte + if p.ConvergentEncryption && convergentVersion == 1 { + ciphertext = decoded + } else { + nonce = decoded[:aead.NonceSize()] + ciphertext = decoded[aead.NonceSize():] + } + + // Verify and Decrypt + plain, err = aead.Open(nil, nonce, ciphertext, nil) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: unable to decrypt"} + } + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[strconv.Itoa(ver)].RSAKey + plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + return base64.StdEncoding.EncodeToString(plain), nil +} + +func (p *Policy) HMACKey(version int) ([]byte, error) { + switch { + case version < 0: + return nil, fmt.Errorf("key version does not exist (cannot be negative)") + case version > p.LatestVersion: + return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion) + } + + if p.Keys[strconv.Itoa(version)].HMACKey == nil { + return nil, fmt.Errorf("no HMAC key exists for that key version") + } + + return p.Keys[strconv.Itoa(version)].HMACKey, nil +} + +func (p *Policy) Sign(ver int, context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType) (*SigningResult, error) { + if !p.Type.SigningSupported() { + return nil, fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return nil, errutil.UserError{Err: "requested version for signing is negative"} + case ver > p.LatestVersion: + return nil, errutil.UserError{Err: "requested version for signing is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return nil, errutil.UserError{Err: "requested version for signing is less than the minimum encryption key version"} + } + + var sig []byte + var pubKey []byte + var err error + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curveBits int + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curveBits = 384 + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curveBits = 521 + curve = elliptic.P521() + default: + curveBits = 256 + curve = elliptic.P256() + } + + keyParams := p.Keys[strconv.Itoa(ver)] + key := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: keyParams.EC_X, + Y: keyParams.EC_Y, + }, + D: keyParams.EC_D, + } + + r, s, err := ecdsa.Sign(rand.Reader, key, input) + if err != nil { + return nil, err + } + + switch marshaling { + case MarshalingTypeASN1: + // This is used by openssl and X.509 + sig, err = asn1.Marshal(ecdsaSignature{ + R: r, + S: s, + }) + if err != nil { + return nil, err + } + + case MarshalingTypeJWS: + // This is used by JWS + + // First we have to get the length of the curve in bytes. Although + // we only support 256 now, we'll do this in an agnostic way so we + // can reuse this marshaling if we support e.g. 521. Getting the + // number of bytes without rounding up would be 65.125 so we need + // to add one in that case. + keyLen := curveBits / 8 + if curveBits%8 > 0 { + keyLen++ + } + + // Now create the output array + sig = make([]byte, keyLen*2) + rb := r.Bytes() + sb := s.Bytes() + copy(sig[keyLen-len(rb):], rb) + copy(sig[2*keyLen-len(sb):], sb) + + default: + return nil, errutil.UserError{Err: "requested marshaling type is invalid"} + } + + case KeyType_ED25519: + var key ed25519.PrivateKey + + if p.Derived { + // Derive the key that should be used + var err error + key, err = p.DeriveKey(context, ver, 32) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + pubKey = key.Public().(ed25519.PublicKey) + } else { + key = ed25519.PrivateKey(p.Keys[strconv.Itoa(ver)].Key) + } + + // Per docs, do not pre-hash ed25519; it does two passes and performs + // its own hashing + sig, err = key.Sign(rand.Reader, input, crypto.Hash(0)) + if err != nil { + return nil, err + } + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[strconv.Itoa(ver)].RSAKey + + var algo crypto.Hash + switch hashAlgorithm { + case HashTypeSHA1: + algo = crypto.SHA1 + case HashTypeSHA2224: + algo = crypto.SHA224 + case HashTypeSHA2256: + algo = crypto.SHA256 + case HashTypeSHA2384: + algo = crypto.SHA384 + case HashTypeSHA2512: + algo = crypto.SHA512 + default: + return nil, errutil.InternalError{Err: "unsupported hash algorithm"} + } + + if sigAlgorithm == "" { + sigAlgorithm = "pss" + } + + switch sigAlgorithm { + case "pss": + sig, err = rsa.SignPSS(rand.Reader, key, algo, input, nil) + if err != nil { + return nil, err + } + case "pkcs1v15": + sig, err = rsa.SignPKCS1v15(rand.Reader, key, algo, input) + if err != nil { + return nil, err + } + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} + } + + default: + return nil, fmt.Errorf("unsupported key type %v", p.Type) + } + + // Convert to base64 + var encoded string + switch marshaling { + case MarshalingTypeASN1: + encoded = base64.StdEncoding.EncodeToString(sig) + case MarshalingTypeJWS: + encoded = base64.RawURLEncoding.EncodeToString(sig) + } + res := &SigningResult{ + Signature: p.getVersionPrefix(ver) + encoded, + PublicKey: pubKey, + } + + return res, nil +} + +func (p *Policy) VerifySignature(context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType, sig string) (bool, error) { + if !p.Type.SigningSupported() { + return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} + } + + tplParts, err := p.getTemplateParts() + if err != nil { + return false, err + } + + // Verify the prefix + if !strings.HasPrefix(sig, tplParts[0]) { + return false, errutil.UserError{Err: "invalid signature: no prefix"} + } + + splitVerSig := strings.SplitN(strings.TrimPrefix(sig, tplParts[0]), tplParts[1], 2) + if len(splitVerSig) != 2 { + return false, errutil.UserError{Err: "invalid signature: wrong number of fields"} + } + + ver, err := strconv.Atoi(splitVerSig[0]) + if err != nil { + return false, errutil.UserError{Err: "invalid signature: version number could not be decoded"} + } + + if ver > p.LatestVersion { + return false, errutil.UserError{Err: "invalid signature: version is too new"} + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return false, errutil.UserError{Err: ErrTooOld} + } + + var sigBytes []byte + switch marshaling { + case MarshalingTypeASN1: + sigBytes, err = base64.StdEncoding.DecodeString(splitVerSig[1]) + case MarshalingTypeJWS: + sigBytes, err = base64.RawURLEncoding.DecodeString(splitVerSig[1]) + default: + return false, errutil.UserError{Err: "requested marshaling type is invalid"} + } + if err != nil { + return false, errutil.UserError{Err: "invalid base64 signature value"} + } + + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + var ecdsaSig ecdsaSignature + + switch marshaling { + case MarshalingTypeASN1: + rest, err := asn1.Unmarshal(sigBytes, &ecdsaSig) + if err != nil { + return false, errutil.UserError{Err: "supplied signature is invalid"} + } + if rest != nil && len(rest) != 0 { + return false, errutil.UserError{Err: "supplied signature contains extra data"} + } + + case MarshalingTypeJWS: + paramLen := len(sigBytes) / 2 + rb := sigBytes[:paramLen] + sb := sigBytes[paramLen:] + ecdsaSig.R = new(big.Int) + ecdsaSig.R.SetBytes(rb) + ecdsaSig.S = new(big.Int) + ecdsaSig.S.SetBytes(sb) + } + + keyParams := p.Keys[strconv.Itoa(ver)] + key := &ecdsa.PublicKey{ + Curve: curve, + X: keyParams.EC_X, + Y: keyParams.EC_Y, + } + + return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil + + case KeyType_ED25519: + var key ed25519.PrivateKey + + if p.Derived { + // Derive the key that should be used + var err error + key, err = p.DeriveKey(context, ver, 32) + if err != nil { + return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + } else { + key = ed25519.PrivateKey(p.Keys[strconv.Itoa(ver)].Key) + } + + return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[strconv.Itoa(ver)].RSAKey + + var algo crypto.Hash + switch hashAlgorithm { + case HashTypeSHA1: + algo = crypto.SHA1 + case HashTypeSHA2224: + algo = crypto.SHA224 + case HashTypeSHA2256: + algo = crypto.SHA256 + case HashTypeSHA2384: + algo = crypto.SHA384 + case HashTypeSHA2512: + algo = crypto.SHA512 + default: + return false, errutil.InternalError{Err: "unsupported hash algorithm"} + } + + if sigAlgorithm == "" { + sigAlgorithm = "pss" + } + + switch sigAlgorithm { + case "pss": + err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, nil) + case "pkcs1v15": + err = rsa.VerifyPKCS1v15(&key.PublicKey, algo, input, sigBytes) + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} + } + + return err == nil, nil + + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } +} + +func (p *Policy) Rotate(ctx context.Context, storage logical.Storage, randReader io.Reader) (retErr error) { + priorLatestVersion := p.LatestVersion + priorMinDecryptionVersion := p.MinDecryptionVersion + var priorKeys keyEntryMap + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.LatestVersion = priorLatestVersion + p.MinDecryptionVersion = priorMinDecryptionVersion + p.Keys = priorKeys + } + }() + + if p.Keys == nil { + // This is an initial key rotation when generating a new policy. We + // don't need to call migrate here because if we've called getPolicy to + // get the policy in the first place it will have been run. + p.Keys = keyEntryMap{} + } + + p.LatestVersion += 1 + now := time.Now() + entry := KeyEntry{ + CreationTime: now, + DeprecatedCreationTime: now.Unix(), + } + + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + // Default to 256 bit key + numBytes := 32 + if p.Type == KeyType_AES128_GCM96 { + numBytes = 16 + } + newKey, err := uuid.GenerateRandomBytesWithReader(numBytes, randReader) + if err != nil { + return err + } + entry.Key = newKey + + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + privKey, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return err + } + entry.EC_D = privKey.D + entry.EC_X = privKey.X + entry.EC_Y = privKey.Y + derBytes, err := x509.MarshalPKIXPublicKey(privKey.Public()) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + entry.FormattedPublicKey = string(pemBytes) + + case KeyType_ED25519: + pub, pri, err := ed25519.GenerateKey(randReader) + if err != nil { + return err + } + entry.Key = pri + entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) + + case KeyType_RSA2048, KeyType_RSA4096: + bitSize := 2048 + if p.Type == KeyType_RSA4096 { + bitSize = 4096 + } + + entry.RSAKey, err = rsa.GenerateKey(randReader, bitSize) + if err != nil { + return err + } + } + + if p.ConvergentEncryption { + if p.ConvergentVersion == -1 || p.ConvergentVersion > 1 { + entry.ConvergentVersion = currentConvergentVersion + } + } + + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + + // This ensures that with new key creations min decryption version is set + // to 1 rather than the int default of 0, since keys start at 1 (either + // fresh or after migration to the key map) + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + } + + return p.Persist(ctx, storage) +} + +func (p *Policy) MigrateKeyToKeysMap() { + now := time.Now() + p.Keys = keyEntryMap{ + "1": KeyEntry{ + Key: p.Key, + CreationTime: now, + DeprecatedCreationTime: now.Unix(), + }, + } + p.Key = nil +} + +// Backup should be called with an exclusive lock held on the policy +func (p *Policy) Backup(ctx context.Context, storage logical.Storage) (out string, retErr error) { + if !p.Exportable { + return "", fmt.Errorf("exporting is disallowed on the policy") + } + + if !p.AllowPlaintextBackup { + return "", fmt.Errorf("plaintext backup is disallowed on the policy") + } + + priorBackupInfo := p.BackupInfo + + defer func() { + if retErr != nil { + p.BackupInfo = priorBackupInfo + } + }() + + // Create a record of this backup operation in the policy + p.BackupInfo = &BackupInfo{ + Time: time.Now(), + Version: p.LatestVersion, + } + err := p.Persist(ctx, storage) + if err != nil { + return "", errwrap.Wrapf("failed to persist policy with backup info: {{err}}", err) + } + + // Load the archive only after persisting the policy as the archive can get + // adjusted while persisting the policy + archivedKeys, err := p.LoadArchive(ctx, storage) + if err != nil { + return "", err + } + + keyData := &KeyData{ + Policy: p, + ArchivedKeys: archivedKeys, + } + + encodedBackup, err := jsonutil.EncodeJSON(keyData) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(encodedBackup), nil +} + +func (p *Policy) getTemplateParts() ([]string, error) { + partsRaw, ok := p.versionPrefixCache.Load("template-parts") + if ok { + return partsRaw.([]string), nil + } + + template := p.VersionTemplate + if template == "" { + template = DefaultVersionTemplate + } + + tplParts := strings.Split(template, "{{version}}") + if len(tplParts) != 2 { + return nil, errutil.InternalError{Err: "error parsing version template"} + } + + p.versionPrefixCache.Store("template-parts", tplParts) + return tplParts, nil +} + +func (p *Policy) getVersionPrefix(ver int) string { + prefixRaw, ok := p.versionPrefixCache.Load(ver) + if ok { + return prefixRaw.(string) + } + + template := p.VersionTemplate + if template == "" { + template = DefaultVersionTemplate + } + + prefix := strings.Replace(template, "{{version}}", strconv.Itoa(ver), -1) + p.versionPrefixCache.Store(ver, prefix) + + return prefix +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_lru.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_lru.go new file mode 100644 index 00000000..cd1f6daf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_lru.go @@ -0,0 +1,29 @@ +package keysutil + +import lru "github.com/hashicorp/golang-lru" + +type TransitLRU struct { + size int + lru *lru.TwoQueueCache +} + +func NewTransitLRU(size int) (*TransitLRU, error) { + lru, err := lru.New2Q(size) + return &TransitLRU{lru: lru, size: size}, err +} + +func (c *TransitLRU) Delete(key interface{}) { + c.lru.Remove(key) +} + +func (c *TransitLRU) Load(key interface{}) (value interface{}, ok bool) { + return c.lru.Get(key) +} + +func (c *TransitLRU) Store(key, value interface{}) { + c.lru.Add(key, value) +} + +func (c *TransitLRU) Size() int { + return c.size +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_syncmap.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_syncmap.go new file mode 100644 index 00000000..ce907138 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/transit_syncmap.go @@ -0,0 +1,29 @@ +package keysutil + +import ( + "sync" +) + +type TransitSyncMap struct { + syncmap sync.Map +} + +func NewTransitSyncMap() *TransitSyncMap { + return &TransitSyncMap{syncmap: sync.Map{}} +} + +func (c *TransitSyncMap) Delete(key interface{}) { + c.syncmap.Delete(key) +} + +func (c *TransitSyncMap) Load(key interface{}) (value interface{}, ok bool) { + return c.syncmap.Load(key) +} + +func (c *TransitSyncMap) Store(key, value interface{}) { + c.syncmap.Store(key, value) +} + +func (c *TransitSyncMap) Size() int { + return 0 +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go new file mode 100644 index 00000000..c7c000a5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go @@ -0,0 +1,10 @@ +package license + +// Features is a bitmask of feature flags +type Features uint + +const FeatureNone Features = 0 + +func (f Features) HasFeature(flag Features) bool { + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go new file mode 100644 index 00000000..1c854024 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go @@ -0,0 +1,59 @@ +package locksutil + +import ( + "sync" + + "github.com/hashicorp/vault/sdk/helper/cryptoutil" +) + +const ( + LockCount = 256 +) + +type LockEntry struct { + sync.RWMutex +} + +// CreateLocks returns an array so that the locks can be iterated over in +// order. +// +// This is only threadsafe if a process is using a single lock, or iterating +// over the entire lock slice in order. Using a consistent order avoids +// deadlocks because you can never have the following: +// +// Lock A, Lock B +// Lock B, Lock A +// +// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A +// +func CreateLocks() []*LockEntry { + ret := make([]*LockEntry, LockCount) + for i := range ret { + ret[i] = new(LockEntry) + } + return ret +} + +func LockIndexForKey(key string) uint8 { + return uint8(cryptoutil.Blake2b256Hash(key)[0]) +} + +func LockForKey(locks []*LockEntry, key string) *LockEntry { + return locks[LockIndexForKey(key)] +} + +func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { + lockIndexes := make(map[uint8]struct{}, len(keys)) + for _, k := range keys { + lockIndexes[LockIndexForKey(k)] = struct{}{} + } + + locksToReturn := make([]*LockEntry, 0, len(keys)) + for i, l := range locks { + if _, ok := lockIndexes[uint8(i)]; ok { + locksToReturn = append(locksToReturn, l) + } + } + + return locksToReturn +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go new file mode 100644 index 00000000..27578e3f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go @@ -0,0 +1,81 @@ +package logging + +import ( + "fmt" + "io" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +type LogFormat int + +const ( + UnspecifiedFormat LogFormat = iota + StandardFormat + JSONFormat +) + +// Stringer implementation +func (l LogFormat) String() string { + switch l { + case UnspecifiedFormat: + return "unspecified" + case StandardFormat: + return "standard" + case JSONFormat: + return "json" + } + + // unreachable + return "unknown" +} + +// NewVaultLogger creates a new logger with the specified level and a Vault +// formatter +func NewVaultLogger(level log.Level) log.Logger { + return NewVaultLoggerWithWriter(log.DefaultOutput, level) +} + +// NewVaultLoggerWithWriter creates a new logger with the specified level and +// writer and a Vault formatter +func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { + opts := &log.LoggerOptions{ + Level: level, + Output: w, + JSONFormat: ParseEnvLogFormat() == JSONFormat, + } + return log.New(opts) +} + +// ParseLogFormat parses the log format from the provided string. +func ParseLogFormat(format string) (LogFormat, error) { + + switch strings.ToLower(strings.TrimSpace(format)) { + case "": + return UnspecifiedFormat, nil + case "standard": + return StandardFormat, nil + case "json": + return JSONFormat, nil + default: + return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format) + } +} + +// ParseEnvLogFormat parses the log format from an environment variable. +func ParseEnvLogFormat() LogFormat { + logFormat := os.Getenv("VAULT_LOG_FORMAT") + if logFormat == "" { + logFormat = os.Getenv("LOGXI_FORMAT") + } + switch strings.ToLower(logFormat) { + case "json", "vault_json", "vault-json", "vaultjson": + return JSONFormat + case "standard": + return StandardFormat + default: + return UnspecifiedFormat + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock.go b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock.go new file mode 100644 index 00000000..1675633d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock.go @@ -0,0 +1,15 @@ +package mlock + +// This should be set by the OS-specific packages to tell whether LockMemory +// is supported or not. +var supported bool + +// Supported returns true if LockMemory is functional on this system. +func Supported() bool { + return supported +} + +// LockMemory prevents any memory from being swapped to disk. +func LockMemory() error { + return lockMemory() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unavail.go new file mode 100644 index 00000000..8084963f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unavail.go @@ -0,0 +1,13 @@ +// +build android darwin nacl netbsd plan9 windows + +package mlock + +func init() { + supported = false +} + +func lockMemory() error { + // XXX: No good way to do this on Windows. There is the VirtualLock + // method, but it requires a specific address and offset. + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unix.go b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unix.go new file mode 100644 index 00000000..af0a69d4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unix.go @@ -0,0 +1,18 @@ +// +build dragonfly freebsd linux openbsd solaris + +package mlock + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func init() { + supported = true +} + +func lockMemory() error { + // Mlockall prevents all current and future pages from being swapped out. + return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go new file mode 100644 index 00000000..e0e39445 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go @@ -0,0 +1,136 @@ +package pathmanager + +import ( + "strings" + "sync" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// PathManager is a prefix searchable index of paths +type PathManager struct { + l sync.RWMutex + paths *iradix.Tree +} + +// New creates a new path manager +func New() *PathManager { + return &PathManager{ + paths: iradix.New(), + } +} + +// AddPaths adds path to the paths list +func (p *PathManager) AddPaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + var exception bool + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + exception = true + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception) + } + p.paths = txn.Commit() +} + +// RemovePaths removes paths from the paths list +func (p *PathManager) RemovePaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + // Exceptions aren't stored with the leading ! so strip it + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Delete([]byte(strings.TrimSuffix(prefix, "*"))) + } + p.paths = txn.Commit() +} + +// RemovePathPrefix removes all paths with the given prefix +func (p *PathManager) RemovePathPrefix(prefix string) { + p.l.Lock() + defer p.l.Unlock() + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*"))) +} + +// Len returns the number of paths +func (p *PathManager) Len() int { + return p.paths.Len() +} + +// Paths returns the path list +func (p *PathManager) Paths() []string { + p.l.RLock() + defer p.l.RUnlock() + + paths := make([]string, 0, p.paths.Len()) + walkFn := func(k []byte, v interface{}) bool { + paths = append(paths, string(k)) + return false + } + p.paths.Root().Walk(walkFn) + return paths +} + +// HasPath returns if the prefix for the path exists regardless if it is a path +// (ending with /) or a prefix for a leaf node +func (p *PathManager) HasPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + return !exception + } + return false +} + +// HasExactPath returns if the longest match is an exact match for the +// full path +func (p *PathManager) HasExactPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + + strVal := string(val) + if strings.HasSuffix(strVal, "/") || strVal == path { + return !exception + } + } + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go new file mode 100644 index 00000000..e5e2a8e0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go @@ -0,0 +1,69 @@ +package pluginutil + +import ( + "os" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/sdk/helper/mlock" +) + +var ( + // PluginMlockEnabled is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" + + // PluginVaultVersionEnv is the ENV name used to pass the version of the + // vault server to the plugin + PluginVaultVersionEnv = "VAULT_VERSION" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded + // string. Used for testing. + PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} + +// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or +// it fails to meet the version constraint. +func GRPCSupport() bool { + verString := os.Getenv(PluginVaultVersionEnv) + // If the env var is empty, we fall back to netrpc for backward compatibility. + if verString == "" { + return false + } + if verString != "unknown" { + ver, err := version.NewVersion(verString) + if err != nil { + return true + } + // Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4 + // to allow the plugin framework to default to gRPC. + constraint, err := version.NewConstraint(">= 0.9.4") + if err != nil { + return true + } + return constraint.Check(ver) + } + return true +} + +// InMetadataMode returns true if the plugin calling this function is running in metadata mode. +func InMetadataMode() bool { + return os.Getenv(PluginMetadataModeEnv) == "true" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go new file mode 100644 index 00000000..a5723630 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go @@ -0,0 +1,148 @@ +package pluginutil + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "fmt" + "os/exec" + "time" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/version" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for available plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(context.Context, string, consts.PluginType) (*PluginRunner, error) +} + +// RunnerUtil interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a response wrapped token. +// logical.SystemView implementations satisfy this interface. +type RunnerUtil interface { + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool +} + +// LookRunnerUtil defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name" structs:"name"` + Type consts.PluginType `json:"type" structs:"type"` + Command string `json:"command" structs:"command"` + Args []string `json:"args" structs:"args"` + Env []string `json:"env" structs:"env"` + Sha256 []byte `json:"sha256" structs:"sha256"` + Builtin bool `json:"builtin" structs:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` +} + +// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and +// returns a configured plugin.Client with TLS Configured and a wrapping token set +// on PluginUnwrapTokenEnv for plugin process consumption. +func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.runCommon(ctx, wrapper, pluginSets, hs, env, logger, false) +} + +// RunMetadataMode returns a configured plugin.Client that will dispense a plugin +// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to +// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider. +func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.runCommon(ctx, wrapper, pluginSets, hs, env, logger, true) + +} + +func (r *PluginRunner) runCommon(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger, isMetadataMode bool) (*plugin.Client, error) { + cmd := exec.Command(r.Command, r.Args...) + + // `env` should always go last to avoid overwriting internal values that might + // have been provided externally. + cmd.Env = append(cmd.Env, r.Env...) + cmd.Env = append(cmd.Env, env...) + + // Add the mlock setting to the ENV of the plugin + if wrapper != nil && wrapper.MlockEnabled() { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) + + var clientTLSConfig *tls.Config + if !isMetadataMode { + // Add the metadata mode ENV and set it to false + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadataModeEnv, "false")) + + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err = createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(ctx, wrapper, certBytes, key) + if err != nil { + return nil, err + } + + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + } else { + logger = logger.With("metadata", "true") + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadataModeEnv, "true")) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: r.Sha256, + Hash: sha256.New(), + } + + clientConfig := &plugin.ClientConfig{ + HandshakeConfig: hs, + VersionedPlugins: pluginSets, + Cmd: cmd, + SecureConfig: secureConfig, + TLSConfig: clientTLSConfig, + Logger: logger, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + } + + client := plugin.NewClient(clientConfig) + + return client, nil +} + +// CtxCancelIfCanceled takes a context cancel func and a context. If the context is +// shutdown the cancelfunc is called. This is useful for merging two cancel +// functions. +func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} { + quitCh := make(chan struct{}) + go func() { + select { + case <-quitCh: + case <-ctxCanceler.Done(): + f() + } + }() + return quitCh +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go new file mode 100644 index 00000000..f78f0401 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go @@ -0,0 +1,108 @@ +package pluginutil + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ClientCAs: clientCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*60, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go new file mode 100644 index 00000000..85beaf21 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go @@ -0,0 +1,131 @@ +package policyutil + +import ( + "sort" + "strings" + + "github.com/hashicorp/vault/sdk/helper/strutil" +) + +const ( + AddDefaultPolicy = true + DoNotAddDefaultPolicy = false +) + +// ParsePolicies parses a comma-delimited list of policies. +// The resulting collection will have no duplicate elements. +// If 'root' policy was present in the list of policies, then +// all other policies will be ignored, the result will contain +// just the 'root'. In cases where 'root' is not present, if +// 'default' policy is not already present, it will be added. +func ParsePolicies(policiesRaw interface{}) []string { + if policiesRaw == nil { + return []string{"default"} + } + + var policies []string + switch policiesRaw.(type) { + case string: + if policiesRaw.(string) == "" { + return []string{} + } + policies = strings.Split(policiesRaw.(string), ",") + case []string: + policies = policiesRaw.([]string) + } + + return SanitizePolicies(policies, false) +} + +// SanitizePolicies performs the common input validation tasks +// which are performed on the list of policies across Vault. +// The resulting collection will have no duplicate elements. +// If 'root' policy was present in the list of policies, then +// all other policies will be ignored, the result will contain +// just the 'root'. In cases where 'root' is not present, if +// 'default' policy is not already present, it will be added +// if addDefault is set to true. +func SanitizePolicies(policies []string, addDefault bool) []string { + defaultFound := false + for i, p := range policies { + policies[i] = strings.ToLower(strings.TrimSpace(p)) + // Eliminate unnamed policies. + if policies[i] == "" { + continue + } + + // If 'root' policy is present, ignore all other policies. + if policies[i] == "root" { + policies = []string{"root"} + defaultFound = true + break + } + if policies[i] == "default" { + defaultFound = true + } + } + + // Always add 'default' except only if the policies contain 'root'. + if addDefault && (len(policies) == 0 || !defaultFound) { + policies = append(policies, "default") + } + + return strutil.RemoveDuplicates(policies, true) +} + +// EquivalentPolicies checks whether the given policy sets are equivalent, as in, +// they contain the same values. The benefit of this method is that it leaves +// the "default" policy out of its comparisons as it may be added later by core +// after a set of policies has been saved by a backend. +func EquivalentPolicies(a, b []string) bool { + switch { + case a == nil && b == nil: + return true + case a == nil && len(b) == 1 && b[0] == "default": + return true + case b == nil && len(a) == 1 && a[0] == "default": + return true + case a == nil || b == nil: + return false + } + + // First we'll build maps to ensure unique values and filter default + mapA := map[string]bool{} + mapB := map[string]bool{} + for _, keyA := range a { + if keyA == "default" { + continue + } + mapA[keyA] = true + } + for _, keyB := range b { + if keyB == "default" { + continue + } + mapB[keyB] = true + } + + // Now we'll build our checking slices + var sortedA, sortedB []string + for keyA, _ := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB, _ := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/salt/salt.go b/vendor/github.com/hashicorp/vault/sdk/helper/salt/salt.go new file mode 100644 index 00000000..e9b7b6e9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/salt/salt.go @@ -0,0 +1,178 @@ +package salt + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // DefaultLocation is the path in the view we store our key salt + // if no other path is provided. + DefaultLocation = "salt" +) + +// Salt is used to manage a persistent salt key which is used to +// hash values. This allows keys to be generated and recovered +// using the global salt. Primarily, this allows paths in the storage +// backend to be obfuscated if they may contain sensitive information. +type Salt struct { + config *Config + salt string + generated bool +} + +type HashFunc func([]byte) []byte + +// Config is used to parameterize the Salt +type Config struct { + // Location is the path in the storage backend for the + // salt. Uses DefaultLocation if not specified. + Location string + + // HashFunc is the hashing function to use for salting. + // Defaults to SHA1 if not provided. + HashFunc HashFunc + + // HMAC allows specification of a hash function to use for + // the HMAC helpers + HMAC func() hash.Hash + + // String prepended to HMAC strings for identification. + // Required if using HMAC + HMACType string +} + +// NewSalt creates a new salt based on the configuration +func NewSalt(ctx context.Context, view logical.Storage, config *Config) (*Salt, error) { + // Setup the configuration + if config == nil { + config = &Config{} + } + if config.Location == "" { + config.Location = DefaultLocation + } + if config.HashFunc == nil { + config.HashFunc = SHA256Hash + } + if config.HMAC == nil { + config.HMAC = sha256.New + config.HMACType = "hmac-sha256" + } + + // Create the salt + s := &Salt{ + config: config, + } + + // Look for the salt + var raw *logical.StorageEntry + var err error + if view != nil { + raw, err = view.Get(ctx, config.Location) + if err != nil { + return nil, errwrap.Wrapf("failed to read salt: {{err}}", err) + } + } + + // Restore the salt if it exists + if raw != nil { + s.salt = string(raw.Value) + } + + // Generate a new salt if necessary + if s.salt == "" { + s.salt, err = uuid.GenerateUUID() + if err != nil { + return nil, errwrap.Wrapf("failed to generate uuid: {{err}}", err) + } + s.generated = true + if view != nil { + raw := &logical.StorageEntry{ + Key: config.Location, + Value: []byte(s.salt), + } + if err := view.Put(ctx, raw); err != nil { + return nil, errwrap.Wrapf("failed to persist salt: {{err}}", err) + } + } + } + + if config.HMAC != nil { + if len(config.HMACType) == 0 { + return nil, fmt.Errorf("HMACType must be defined") + } + } + + return s, nil +} + +// SaltID is used to apply a salt and hash function to an ID to make sure +// it is not reversible +func (s *Salt) SaltID(id string) string { + return SaltID(s.salt, id, s.config.HashFunc) +} + +// GetHMAC is used to apply a salt and hash function to data to make sure it is +// not reversible, with an additional HMAC +func (s *Salt) GetHMAC(data string) string { + hm := hmac.New(s.config.HMAC, []byte(s.salt)) + hm.Write([]byte(data)) + return hex.EncodeToString(hm.Sum(nil)) +} + +// GetIdentifiedHMAC is used to apply a salt and hash function to data to make +// sure it is not reversible, with an additional HMAC, and ID prepended +func (s *Salt) GetIdentifiedHMAC(data string) string { + return s.config.HMACType + ":" + s.GetHMAC(data) +} + +// DidGenerate returns true if the underlying salt value was generated +// on initialization. +func (s *Salt) DidGenerate() bool { + return s.generated +} + +// SaltIDHashFunc uses the supplied hash function instead of the configured +// hash func in the salt. +func (s *Salt) SaltIDHashFunc(id string, hashFunc HashFunc) string { + return SaltID(s.salt, id, hashFunc) +} + +// SaltID is used to apply a salt and hash function to an ID to make sure +// it is not reversible +func SaltID(salt, id string, hash HashFunc) string { + comb := salt + id + hashVal := hash([]byte(comb)) + return hex.EncodeToString(hashVal) +} + +func HMACValue(salt, val string, hashFunc func() hash.Hash) string { + hm := hmac.New(hashFunc, []byte(salt)) + hm.Write([]byte(val)) + return hex.EncodeToString(hm.Sum(nil)) +} + +func HMACIdentifiedValue(salt, val, hmacType string, hashFunc func() hash.Hash) string { + return hmacType + ":" + HMACValue(salt, val, hashFunc) +} + +// SHA1Hash returns the SHA1 of the input +func SHA1Hash(inp []byte) []byte { + hashed := sha1.Sum(inp) + return hashed[:] +} + +// SHA256Hash returns the SHA256 of the input +func SHA256Hash(inp []byte) []byte { + hashed := sha256.Sum256(inp) + return hashed[:] +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/tlsutil/tlsutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/tlsutil/tlsutil.go new file mode 100644 index 00000000..236d32ec --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/tlsutil/tlsutil.go @@ -0,0 +1,109 @@ +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/strutil" +) + +var ErrInvalidCertParams = errors.New("invalid certificate parameters") + +// TLSLookup maps the tls_min_version configuration to the internal value +var TLSLookup = map[string]uint16{ + "tls10": tls.VersionTLS10, + "tls11": tls.VersionTLS11, + "tls12": tls.VersionTLS12, +} + +// cipherMap maps the cipher suite names to the internal cipher suite code. +var cipherMap = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// ParseCiphers parse ciphersuites from the comma-separated string into recognized slice +func ParseCiphers(cipherStr string) ([]uint16, error) { + suites := []uint16{} + ciphers := strutil.ParseStringSlice(cipherStr, ",") + for _, cipher := range ciphers { + if v, ok := cipherMap[cipher]; ok { + suites = append(suites, v) + } else { + return suites, fmt.Errorf("unsupported cipher %q", cipher) + } + } + + return suites, nil +} + +// GetCipherName returns the name of a given cipher suite code or an error if the +// given cipher is unsupported. +func GetCipherName(cipher uint16) (string, error) { + for cipherStr, cipherCode := range cipherMap { + if cipherCode == cipher { + return cipherStr, nil + } + } + return "", fmt.Errorf("unsupported cipher %d", cipher) +} + +func ClientTLSConfig(caCert []byte, clientCert []byte, clientKey []byte) (*tls.Config, error) { + var tlsConfig *tls.Config + var pool *x509.CertPool + + switch { + case len(caCert) != 0: + // Valid + case len(clientCert) != 0 && len(clientKey) != 0: + // Valid + default: + return nil, ErrInvalidCertParams + } + + if len(caCert) != 0 { + pool = x509.NewCertPool() + pool.AppendCertsFromPEM(caCert) + } + + tlsConfig = &tls.Config{ + RootCAs: pool, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, + } + + var cert tls.Certificate + var err error + if len(clientCert) != 0 && len(clientKey) != 0 { + cert, err = tls.X509KeyPair(clientCert, clientKey) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go new file mode 100644 index 00000000..bc4dd1d7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go @@ -0,0 +1,417 @@ +package tokenutil + +import ( + "errors" + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// TokenParams contains a set of common parameters that auth plugins can use +// for setting token behavior +type TokenParams struct { + // The set of CIDRs that tokens generated using this role will be bound to + TokenBoundCIDRs []*sockaddr.SockAddrMarshaler `json:"token_bound_cidrs"` + + // If set, the token entry will have an explicit maximum TTL set, rather + // than deferring to role/mount values + TokenExplicitMaxTTL time.Duration `json:"token_explicit_max_ttl" mapstructure:"token_explicit_max_ttl"` + + // The max TTL to use for the token + TokenMaxTTL time.Duration `json:"token_max_ttl" mapstructure:"token_max_ttl"` + + // If set, core will not automatically add default to the policy list + TokenNoDefaultPolicy bool `json:"token_no_default_policy" mapstructure:"token_no_default_policy"` + + // The maximum number of times a token issued from this role may be used. + TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses"` + + // If non-zero, tokens created using this role will be able to be renewed + // forever, but will have a fixed renewal period of this value + TokenPeriod time.Duration `json:"token_period" mapstructure:"token_period"` + + // The policies to set + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies"` + + // The type of token this role should issue + TokenType logical.TokenType `json:"token_type" mapstructure:"token_type"` + + // The TTL to user for the token + TokenTTL time.Duration `json:"token_ttl" mapstructure:"token_ttl"` +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. +func AddTokenFields(m map[string]*framework.FieldSchema) { + AddTokenFieldsWithAllowList(m, nil) +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. Allowed can be use to restrict the set, e.g. if +// there would be conflicts. +func AddTokenFieldsWithAllowList(m map[string]*framework.FieldSchema, allowed []string) { + r := TokenFields() + for k, v := range r { + if len(allowed) > 0 && !strutil.StrListContains(allowed, k) { + continue + } + if _, has := m[k]; has { + panic(fmt.Sprintf("adding role field %s would overwrite existing field", k)) + } + m[k] = v + } +} + +// TokenFields provides a set of field schemas for the parameters +func TokenFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "token_bound_cidrs": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Bound CIDRs", + Group: "Tokens", + }, + }, + + "token_explicit_max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: tokenExplicitMaxTTLHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Explicit Maximum TTL", + Group: "Tokens", + }, + }, + + "token_max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "The maximum lifetime of the generated token", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Maximum TTL", + Group: "Tokens", + }, + }, + + "token_no_default_policy": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "If true, the 'default' policy will not automatically be added to generated tokens", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Do Not Attach 'default' Policy To Generated Tokens", + Group: "Tokens", + }, + }, + + "token_period": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: tokenPeriodHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Period", + Group: "Tokens", + }, + }, + + "token_policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Policies", + Group: "Tokens", + }, + }, + + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "default-service", + Description: "The type of token to generate, service or batch", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Type", + Group: "Tokens", + }, + }, + + "token_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "The initial ttl of the token to generate", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Initial TTL", + Group: "Tokens", + }, + }, + + "token_num_uses": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "The maximum number of times a token may be used, a value of zero means unlimited", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Maximum Uses of Generated Tokens", + Group: "Tokens", + }, + }, + } +} + +// ParseTokenFields provides common field parsing functionality into a TokenFields struct +func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldData) error { + if boundCIDRsRaw, ok := d.GetOk("token_bound_cidrs"); ok { + boundCIDRs, err := parseutil.ParseAddrs(boundCIDRsRaw.([]string)) + if err != nil { + return err + } + t.TokenBoundCIDRs = boundCIDRs + } + + if explicitMaxTTLRaw, ok := d.GetOk("token_explicit_max_ttl"); ok { + t.TokenExplicitMaxTTL = time.Duration(explicitMaxTTLRaw.(int)) * time.Second + } + + if maxTTLRaw, ok := d.GetOk("token_max_ttl"); ok { + t.TokenMaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second + } + if t.TokenMaxTTL < 0 { + return errors.New("'token_max_ttl' cannot be negative") + } + + if noDefaultRaw, ok := d.GetOk("token_no_default_policy"); ok { + t.TokenNoDefaultPolicy = noDefaultRaw.(bool) + } + + if periodRaw, ok := d.GetOk("token_period"); ok { + t.TokenPeriod = time.Duration(periodRaw.(int)) * time.Second + } + if t.TokenPeriod < 0 { + return errors.New("'token_period' cannot be negative") + } + + if policiesRaw, ok := d.GetOk("token_policies"); ok { + t.TokenPolicies = policiesRaw.([]string) + } + + if tokenTypeRaw, ok := d.GetOk("token_type"); ok { + var tokenType logical.TokenType + tokenTypeStr := tokenTypeRaw.(string) + switch tokenTypeStr { + case "", "default": + tokenType = logical.TokenTypeDefault + case "service": + tokenType = logical.TokenTypeService + case "batch": + tokenType = logical.TokenTypeBatch + default: + return fmt.Errorf("invalid 'token_type' value %q", tokenTypeStr) + } + t.TokenType = tokenType + } + + if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch { + if t.TokenPeriod != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens") + } + if t.TokenNumUses != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate tokens with limited use count") + } + } + + if ttlRaw, ok := d.GetOk("token_ttl"); ok { + t.TokenTTL = time.Duration(ttlRaw.(int)) * time.Second + } + if t.TokenTTL < 0 { + return errors.New("'token_ttl' cannot be negative") + } + if t.TokenTTL > 0 && t.TokenMaxTTL > 0 && t.TokenTTL > t.TokenMaxTTL { + return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'") + } + + if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { + t.TokenNumUses = tokenNumUses.(int) + } + if t.TokenNumUses < 0 { + return errors.New("'token_num_uses' cannot be negative") + } + + return nil +} + +// PopulateTokenData adds information from TokenParams into the map +func (t *TokenParams) PopulateTokenData(m map[string]interface{}) { + m["token_bound_cidrs"] = t.TokenBoundCIDRs + m["token_explicit_max_ttl"] = int64(t.TokenExplicitMaxTTL.Seconds()) + m["token_max_ttl"] = int64(t.TokenMaxTTL.Seconds()) + m["token_no_default_policy"] = t.TokenNoDefaultPolicy + m["token_period"] = int64(t.TokenPeriod.Seconds()) + m["token_policies"] = t.TokenPolicies + m["token_type"] = t.TokenType.String() + m["token_ttl"] = int64(t.TokenTTL.Seconds()) + m["token_num_uses"] = t.TokenNumUses + + if len(t.TokenPolicies) == 0 { + m["token_policies"] = []string{} + } + + if len(t.TokenBoundCIDRs) == 0 { + m["token_bound_cidrs"] = []string{} + } +} + +// PopulateTokenAuth populates Auth with parameters +func (t *TokenParams) PopulateTokenAuth(auth *logical.Auth) { + auth.BoundCIDRs = t.TokenBoundCIDRs + auth.ExplicitMaxTTL = t.TokenExplicitMaxTTL + auth.MaxTTL = t.TokenMaxTTL + auth.NoDefaultPolicy = t.TokenNoDefaultPolicy + auth.Period = t.TokenPeriod + auth.Policies = t.TokenPolicies + auth.Renewable = true + auth.TokenType = t.TokenType + auth.TTL = t.TokenTTL + auth.NumUses = t.TokenNumUses +} + +func DeprecationText(param string) string { + return fmt.Sprintf("Use %q instead. If this and %q are both specified, only %q will be used.", param, param, param) +} + +func upgradeDurationValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *time.Duration) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = time.Duration(raw.(int)) * time.Second + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeIntValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *int) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = raw.(int) + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeStringSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]string) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + // Special case: if we're looking at "token_policies" parse the policies + if newKey == "token_policies" { + *oldVal = policyutil.ParsePolicies(raw) + } else { + *oldVal = raw.([]string) + } + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +func upgradeSockAddrSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]*sockaddr.SockAddrMarshaler) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + boundCIDRs, err := parseutil.ParseAddrs(raw) + if err != nil { + return err + } + *oldVal = boundCIDRs + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +// UpgradeValue takes in old/new data keys and old/new values and calls out to +// a helper function to perform upgrades in a standardized way. It reqiures +// pointers in all cases so that we can set directly into the target struct. +func UpgradeValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal interface{}) error { + switch typedOldVal := oldVal.(type) { + case *time.Duration: + typedNewVal, ok := newVal.(*time.Duration) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeDurationValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *int: + typedNewVal, ok := newVal.(*int) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeIntValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]string: + typedNewVal, ok := newVal.(*[]string) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeStringSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]*sockaddr.SockAddrMarshaler: + typedNewVal, ok := newVal.(*[]*sockaddr.SockAddrMarshaler) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeSockAddrSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + default: + return errors.New("unhandled type in tokenutil.UpgradeValue") + } +} + +const ( + tokenPeriodHelp = `If set, tokens created via this role +will have no max lifetime; instead, their +renewal period will be fixed to this value. +This takes an integer number of seconds, +or a string duration (e.g. "24h").` + tokenExplicitMaxTTLHelp = `If set, tokens created via this role +carry an explicit maximum TTL. During renewal, +the current maximum TTL values of the role +and the mount are not checked for changes, +and any updates to these values will have +no effect on the token being renewed.` +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go new file mode 100644 index 00000000..9c84a1d4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go @@ -0,0 +1,37 @@ +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""` + + // The token accessor for the wrapped response token + Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""` + + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""` + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""` + + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/audit.go b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go new file mode 100644 index 00000000..8ba70f37 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go @@ -0,0 +1,19 @@ +package logical + +type LogInput struct { + Type string + Auth *Auth + Request *Request + Response *Response + OuterErr error + NonHMACReqDataKeys []string + NonHMACRespDataKeys []string +} + +type MarshalOptions struct { + ValueHasher func(string) string +} + +type OptMarshaler interface { + MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/auth.go b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go new file mode 100644 index 00000000..2bfb6e00 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go @@ -0,0 +1,107 @@ +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +// Auth is the resulting authentication information that is part of +// Response for credential backends. +type Auth struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"` + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"` + IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"` + + // ExternalNamespacePolicies represent the policies authorized from + // different namespaces indexed by respective namespace identifiers + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` + + // Indicates that the default policy should not be added by core when + // creating a token. The default policy will still be added if it's + // explicitly defined. + NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"` + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"` + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"` + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period time.Duration `json:"period" mapstructure:"period" structs:"period"` + + // ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal + // tokens, this value is constrained by the configured max ttl. + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` + + // Number of allowed uses of the issued token + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` + + // CreationPath is a path that the backend can return to use in the lease. + // This is currently only supported for the token store where roles may + // change the perceived path of the lease, even though they don't change + // the request path itself. + CreationPath string `json:"creation_path"` + + // TokenType is the type of token being requested + TokenType TokenType `json:"token_type"` + + // Orphan is set if the token does not have a parent + Orphan bool `json:"orphan"` +} + +func (a *Auth) GoString() string { + return fmt.Sprintf("*%#v", *a) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/connection.go b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go new file mode 100644 index 00000000..a504b10c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go @@ -0,0 +1,15 @@ +package logical + +import ( + "crypto/tls" +) + +// Connection represents the connection information for a request. This +// is present on the Request structure for credential backends. +type Connection struct { + // RemoteAddr is the network address that sent the request. + RemoteAddr string `json:"remote_addr"` + + // ConnState is the TLS connection state if applicable. + ConnState *tls.ConnectionState `sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go new file mode 100644 index 00000000..2ed1b076 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go @@ -0,0 +1,17 @@ +package logical + +import ( + "time" +) + +type ControlGroup struct { + Authorizations []*Authz `json:"authorizations"` + RequestTime time.Time `json:"request_time"` + Approved bool `json:"approved"` + NamespaceID string `json:"namespace_id"` +} + +type Authz struct { + Token string `json:"token"` + AuthorizationTime time.Time `json:"authorization_time"` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/error.go b/vendor/github.com/hashicorp/vault/sdk/logical/error.go new file mode 100644 index 00000000..fd896a6c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/error.go @@ -0,0 +1,94 @@ +package logical + +import "errors" + +var ( + // ErrUnsupportedOperation is returned if the operation is not supported + // by the logical backend. + ErrUnsupportedOperation = errors.New("unsupported operation") + + // ErrUnsupportedPath is returned if the path is not supported + // by the logical backend. + ErrUnsupportedPath = errors.New("unsupported path") + + // ErrInvalidRequest is returned if the request is invalid + ErrInvalidRequest = errors.New("invalid request") + + // ErrPermissionDenied is returned if the client is not authorized + ErrPermissionDenied = errors.New("permission denied") + + // ErrMultiAuthzPending is returned if the the request needs more + // authorizations + ErrMultiAuthzPending = errors.New("request needs further approval") + + // ErrUpstreamRateLimited is returned when Vault receives a rate limited + // response from an upstream + ErrUpstreamRateLimited = errors.New("upstream rate limited") + + // ErrPerfStandbyForward is returned when Vault is in a state such that a + // perf standby cannot satisfy a request + ErrPerfStandbyPleaseForward = errors.New("please forward to the active node") +) + +type HTTPCodedError interface { + Error() string + Code() int +} + +func CodedError(status int, msg string) HTTPCodedError { + return &codedError{ + Status: status, + Message: msg, + } +} + +var _ HTTPCodedError = (*codedError)(nil) + +type codedError struct { + Status int + Message string +} + +func (e *codedError) Error() string { + return e.Message +} + +func (e *codedError) Code() int { + return e.Status +} + +// Struct to identify user input errors. This is helpful in responding the +// appropriate status codes to clients from the HTTP endpoints. +type StatusBadRequest struct { + Err string +} + +// Implementing error interface +func (s *StatusBadRequest) Error() string { + return s.Err +} + +// This is a new type declared to not cause potential compatibility problems if +// the logic around the CodedError changes; in particular for logical request +// paths it is basically ignored, and changing that behavior might cause +// unforeseen issues. +type ReplicationCodedError struct { + Msg string + Code int +} + +func (r *ReplicationCodedError) Error() string { + return r.Msg +} + +type KeyNotFoundError struct { + Err error +} + +func (e *KeyNotFoundError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *KeyNotFoundError) Error() string { + return e.Err.Error() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go new file mode 100644 index 00000000..51fde70f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go @@ -0,0 +1,198 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sdk/logical/identity.proto + +package logical + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Entity struct { + // ID is the unique identifier for the entity + ID string `sentinel:"" protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the entity + Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Aliases contains thhe alias mappings for the given entity + Aliases []*Alias `sentinel:"" protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` + // Metadata represents the custom data tied to this entity + Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Disabled is true if the entity is disabled. + Disabled bool `sentinel:"" protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { + return fileDescriptor_4a34d35719c603a1, []int{0} +} + +func (m *Entity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Entity.Unmarshal(m, b) +} +func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Entity.Marshal(b, m, deterministic) +} +func (m *Entity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entity.Merge(m, src) +} +func (m *Entity) XXX_Size() int { + return xxx_messageInfo_Entity.Size(m) +} +func (m *Entity) XXX_DiscardUnknown() { + xxx_messageInfo_Entity.DiscardUnknown(m) +} + +var xxx_messageInfo_Entity proto.InternalMessageInfo + +func (m *Entity) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Entity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Entity) GetAliases() []*Alias { + if m != nil { + return m.Aliases + } + return nil +} + +func (m *Entity) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Entity) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +type Alias struct { + // MountType is the backend mount's type to which this identity belongs + MountType string `sentinel:"" protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + MountAccessor string `sentinel:"" protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Name is the identifier of this identity in its authentication source + Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this alias + Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Alias) Reset() { *m = Alias{} } +func (m *Alias) String() string { return proto.CompactTextString(m) } +func (*Alias) ProtoMessage() {} +func (*Alias) Descriptor() ([]byte, []int) { + return fileDescriptor_4a34d35719c603a1, []int{1} +} + +func (m *Alias) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Alias.Unmarshal(m, b) +} +func (m *Alias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Alias.Marshal(b, m, deterministic) +} +func (m *Alias) XXX_Merge(src proto.Message) { + xxx_messageInfo_Alias.Merge(m, src) +} +func (m *Alias) XXX_Size() int { + return xxx_messageInfo_Alias.Size(m) +} +func (m *Alias) XXX_DiscardUnknown() { + xxx_messageInfo_Alias.DiscardUnknown(m) +} + +var xxx_messageInfo_Alias proto.InternalMessageInfo + +func (m *Alias) GetMountType() string { + if m != nil { + return m.MountType + } + return "" +} + +func (m *Alias) GetMountAccessor() string { + if m != nil { + return m.MountAccessor + } + return "" +} + +func (m *Alias) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Alias) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterType((*Entity)(nil), "logical.Entity") + proto.RegisterMapType((map[string]string)(nil), "logical.Entity.MetadataEntry") + proto.RegisterType((*Alias)(nil), "logical.Alias") + proto.RegisterMapType((map[string]string)(nil), "logical.Alias.MetadataEntry") +} + +func init() { proto.RegisterFile("sdk/logical/identity.proto", fileDescriptor_4a34d35719c603a1) } + +var fileDescriptor_4a34d35719c603a1 = []byte{ + // 310 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xbf, 0x6a, 0xc3, 0x30, + 0x10, 0xc6, 0x91, 0x9d, 0xbf, 0x57, 0x12, 0x8a, 0xe8, 0x60, 0x42, 0x03, 0x21, 0xd0, 0xe2, 0xc9, + 0x86, 0x76, 0x49, 0xdb, 0x29, 0x25, 0x19, 0x32, 0x74, 0x31, 0x9d, 0xba, 0x94, 0x8b, 0x2d, 0x62, + 0x11, 0xd9, 0x32, 0x96, 0x1c, 0xf0, 0x9b, 0xf6, 0x19, 0xfa, 0x14, 0x25, 0xb2, 0x62, 0x12, 0x4a, + 0xa7, 0x6e, 0xba, 0xdf, 0x77, 0x77, 0xba, 0xfb, 0x38, 0x98, 0xa8, 0x64, 0x1f, 0x0a, 0xb9, 0xe3, + 0x31, 0x8a, 0x90, 0x27, 0x2c, 0xd7, 0x5c, 0xd7, 0x41, 0x51, 0x4a, 0x2d, 0x69, 0xdf, 0xf2, 0xf9, + 0x37, 0x81, 0xde, 0xda, 0x28, 0x74, 0x0c, 0xce, 0x66, 0xe5, 0x91, 0x19, 0xf1, 0x87, 0x91, 0xb3, + 0x59, 0x51, 0x0a, 0x9d, 0x1c, 0x33, 0xe6, 0x39, 0x86, 0x98, 0x37, 0xf5, 0xa1, 0x8f, 0x82, 0xa3, + 0x62, 0xca, 0x73, 0x67, 0xae, 0x7f, 0xf5, 0x30, 0x0e, 0x6c, 0xa7, 0x60, 0x79, 0xe4, 0xd1, 0x49, + 0xa6, 0x4f, 0x30, 0xc8, 0x98, 0xc6, 0x04, 0x35, 0x7a, 0x1d, 0x93, 0x3a, 0x6d, 0x53, 0x9b, 0x0f, + 0x83, 0x37, 0xab, 0xaf, 0x73, 0x5d, 0xd6, 0x51, 0x9b, 0x4e, 0x27, 0x30, 0x48, 0xb8, 0xc2, 0xad, + 0x60, 0x89, 0xd7, 0x9d, 0x11, 0x7f, 0x10, 0xb5, 0xf1, 0xe4, 0x05, 0x46, 0x17, 0x65, 0xf4, 0x1a, + 0xdc, 0x3d, 0xab, 0xed, 0xd8, 0xc7, 0x27, 0xbd, 0x81, 0xee, 0x01, 0x45, 0x75, 0x1a, 0xbc, 0x09, + 0x9e, 0x9d, 0x05, 0x99, 0x7f, 0x11, 0xe8, 0x9a, 0x31, 0xe9, 0x14, 0x20, 0x93, 0x55, 0xae, 0x3f, + 0x75, 0x5d, 0x30, 0x5b, 0x3c, 0x34, 0xe4, 0xbd, 0x2e, 0x18, 0xbd, 0x83, 0x71, 0x23, 0x63, 0x1c, + 0x33, 0xa5, 0x64, 0x69, 0x7b, 0x8d, 0x0c, 0x5d, 0x5a, 0xd8, 0x3a, 0xe4, 0x9e, 0x39, 0xb4, 0xf8, + 0xb5, 0xf7, 0xed, 0xa5, 0x45, 0x7f, 0xad, 0xfd, 0xaf, 0xd5, 0x5e, 0xfd, 0x8f, 0xfb, 0x1d, 0xd7, + 0x69, 0xb5, 0x0d, 0x62, 0x99, 0x85, 0x29, 0xaa, 0x94, 0xc7, 0xb2, 0x2c, 0xc2, 0x03, 0x56, 0x42, + 0x87, 0x67, 0x97, 0xb0, 0xed, 0x99, 0x0b, 0x78, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xa9, + 0x8a, 0x39, 0x1f, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto new file mode 100644 index 00000000..65e27435 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message Entity { + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; + + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; + + // Metadata represents the custom data tied to this entity + map metadata = 4; + + // Disabled is true if the entity is disabled. + bool disabled = 5; +} + +message Alias { + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias + map metadata = 4; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/lease.go b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go new file mode 100644 index 00000000..97bbe4f6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go @@ -0,0 +1,53 @@ +package logical + +import ( + "time" +) + +// LeaseOptions is an embeddable struct to capture common lease +// settings between a Secret and Auth +type LeaseOptions struct { + // TTL is the duration that this secret is valid for. Vault + // will automatically revoke it after the duration. + TTL time.Duration `json:"lease"` + + // MaxTTL is the maximum duration that this secret is valid for. + MaxTTL time.Duration `json:"max_ttl"` + + // Renewable, if true, means that this secret can be renewed. + Renewable bool `json:"renewable"` + + // Increment will be the lease increment that the user requested. + // This is only available on a Renew operation and has no effect + // when returning a response. + Increment time.Duration `json:"-"` + + // IssueTime is the time of issue for the original lease. This is + // only available on Renew and Revoke operations and has no effect when returning + // a response. It can be used to enforce maximum lease periods by + // a logical backend. + IssueTime time.Time `json:"-"` +} + +// LeaseEnabled checks if leasing is enabled +func (l *LeaseOptions) LeaseEnabled() bool { + return l.TTL > 0 +} + +// LeaseTotal is the lease duration with a guard against a negative TTL +func (l *LeaseOptions) LeaseTotal() time.Duration { + if l.TTL <= 0 { + return 0 + } + + return l.TTL +} + +// ExpirationTime computes the time until expiration including the grace period +func (l *LeaseOptions) ExpirationTime() time.Time { + var expireTime time.Time + if l.LeaseEnabled() { + expireTime = time.Now().Add(l.LeaseTotal()) + } + return expireTime +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go new file mode 100644 index 00000000..db883153 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go @@ -0,0 +1,135 @@ +package logical + +import ( + "context" + + log "github.com/hashicorp/go-hclog" +) + +// BackendType is the type of backend that is being implemented +type BackendType uint32 + +// The these are the types of backends that can be derived from +// logical.Backend +const ( + TypeUnknown BackendType = 0 // This is also the zero-value for BackendType + TypeLogical BackendType = 1 + TypeCredential BackendType = 2 +) + +// Stringer implementation +func (b BackendType) String() string { + switch b { + case TypeLogical: + return "secret" + case TypeCredential: + return "auth" + } + + return "unknown" +} + +// Backend interface must be implemented to be "mountable" at +// a given path. Requests flow through a router which has various mount +// points that flow to a logical backend. The logic of each backend is flexible, +// and this is what allows materialized keys to function. There can be specialized +// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can +// interact with remote APIs to generate keys dynamically. This interface also +// allows for a "procfs" like interaction, as internal state can be exposed by +// acting like a logical backend and being mounted. +type Backend interface { + + // Initialize is used to initialize a plugin after it has been mounted. + Initialize(context.Context, *InitializationRequest) error + + // HandleRequest is used to handle a request and generate a response. + // The backends must check the operation type and handle appropriately. + HandleRequest(context.Context, *Request) (*Response, error) + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths() *Paths + + // System provides an interface to access certain system configuration + // information, such as globally configured default and max lease TTLs. + System() SystemView + + // Logger provides an interface to access the underlying logger. This + // is useful when a struct embeds a Backend-implemented struct that + // contains a private instance of logger. + Logger() log.Logger + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *Request) (bool, bool, error) + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + Cleanup(context.Context) + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, string) + + // Setup is used to set up the backend based on the provided backend + // configuration. + Setup(context.Context, *BackendConfig) error + + // Type returns the BackendType for the particular backend + Type() BackendType +} + +// BackendConfig is provided to the factory to initialize the backend +type BackendConfig struct { + // View should not be stored, and should only be used for initialization + StorageView Storage + + // The backend should use this logger. The log should not contain any secrets. + Logger log.Logger + + // System provides a view into a subset of safe system information that + // is useful for backends, such as the default/max lease TTLs + System SystemView + + // BackendUUID is a unique identifier provided to this backend. It's useful + // when a backend needs a consistent and unique string without using storage. + BackendUUID string + + // Config is the opaque user configuration provided when mounting + Config map[string]string +} + +// Factory is the factory function to create a logical backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + // Root are the paths that require a root token to access + Root []string + + // Unauthenticated are the paths that can be accessed without any auth. + Unauthenticated []string + + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + LocalStorage []string + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string +} + +type Auditor interface { + AuditRequest(ctx context.Context, input *LogInput) error + AuditResponse(ctx context.Context, input *LogInput) error +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go new file mode 100644 index 00000000..16b85cd7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go @@ -0,0 +1,52 @@ +package logical + +import ( + "context" + + "github.com/hashicorp/vault/sdk/physical" +) + +type LogicalStorage struct { + underlying physical.Backend +} + +func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *LogicalStorage) Delete(ctx context.Context, key string) error { + return s.underlying.Delete(ctx, key) +} + +func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { + return s.underlying.List(ctx, prefix) +} + +func (s *LogicalStorage) Underlying() physical.Backend { + return s.underlying +} + +func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { + return &LogicalStorage{ + underlying: underlying, + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go new file mode 100644 index 00000000..55b8025d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sdk/logical/plugin.proto + +package logical + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type PluginEnvironment struct { + // VaultVersion is the version of the Vault server + VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginEnvironment) Reset() { *m = PluginEnvironment{} } +func (m *PluginEnvironment) String() string { return proto.CompactTextString(m) } +func (*PluginEnvironment) ProtoMessage() {} +func (*PluginEnvironment) Descriptor() ([]byte, []int) { + return fileDescriptor_23bde88b42ca47d4, []int{0} +} + +func (m *PluginEnvironment) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginEnvironment.Unmarshal(m, b) +} +func (m *PluginEnvironment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginEnvironment.Marshal(b, m, deterministic) +} +func (m *PluginEnvironment) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginEnvironment.Merge(m, src) +} +func (m *PluginEnvironment) XXX_Size() int { + return xxx_messageInfo_PluginEnvironment.Size(m) +} +func (m *PluginEnvironment) XXX_DiscardUnknown() { + xxx_messageInfo_PluginEnvironment.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginEnvironment proto.InternalMessageInfo + +func (m *PluginEnvironment) GetVaultVersion() string { + if m != nil { + return m.VaultVersion + } + return "" +} + +func init() { + proto.RegisterType((*PluginEnvironment)(nil), "logical.PluginEnvironment") +} + +func init() { proto.RegisterFile("sdk/logical/plugin.proto", fileDescriptor_23bde88b42ca47d4) } + +var fileDescriptor_23bde88b42ca47d4 = []byte{ + // 137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x4e, 0xc9, 0xd6, + 0xcf, 0xc9, 0x4f, 0xcf, 0x4c, 0x4e, 0xcc, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x8a, 0x2a, 0x59, 0x70, 0x09, 0x06, 0x80, 0x25, 0x5c, + 0xf3, 0xca, 0x32, 0x8b, 0xf2, 0xf3, 0x72, 0x53, 0xf3, 0x4a, 0x84, 0x94, 0xb9, 0x78, 0xcb, 0x12, + 0x4b, 0x73, 0x4a, 0xe2, 0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x24, 0x18, 0x15, 0x18, 0x35, + 0x38, 0x83, 0x78, 0xc0, 0x82, 0x61, 0x10, 0x31, 0x27, 0x8d, 0x28, 0xb5, 0xf4, 0xcc, 0x92, 0x8c, + 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0x8c, 0xc4, 0xe2, 0x8c, 0xcc, 0xe4, 0xfc, 0xa2, 0x02, + 0x7d, 0xb0, 0x22, 0x7d, 0x24, 0x9b, 0x93, 0xd8, 0xc0, 0x76, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0xb8, 0xfa, 0x4e, 0x8f, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto new file mode 100644 index 00000000..5992c213 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message PluginEnvironment { + // VaultVersion is the version of the Vault server + string vault_version = 1; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/request.go b/vendor/github.com/hashicorp/vault/sdk/logical/request.go new file mode 100644 index 00000000..7e7de207 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/request.go @@ -0,0 +1,313 @@ +package logical + +import ( + "fmt" + "net/http" + "strings" + "time" +) + +// RequestWrapInfo is a struct that stores information about desired response +// and seal wrapping behavior +type RequestWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} + +func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) { + if r == nil { + return nil, nil + } + switch key { + case "ttl": + return r.TTL, nil + case "ttl_seconds": + return int64(r.TTL.Seconds()), nil + } + + return nil, nil +} + +func (r *RequestWrapInfo) SentinelKeys() []string { + return []string{ + "ttl", + "ttl_seconds", + } +} + +type ClientTokenSource uint32 + +const ( + NoClientToken ClientTokenSource = iota + ClientTokenFromVaultHeader + ClientTokenFromAuthzHeader +) + +// Request is a struct that stores the parameters and context of a request +// being made to Vault. It is used to abstract the details of the higher level +// request protocol from the handlers. +// +// Note: Many of these have Sentinel disabled because they are values populated +// by the router after policy checks; the token namespace would be the right +// place to access them via Sentinel +type Request struct { + // Id is the uuid associated with each request + ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""` + + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""` + + // Operation is the requested operation type + Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"` + + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""` + + // Request data is an opaque map that must have string keys. + Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"` + + // Storage can be used to durably store and retrieve state. + Storage Storage `json:"-" sentinel:""` + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""` + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""` + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""` + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"` + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""` + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""` + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""` + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""` + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""` + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"` + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""` + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"` + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + + // MFACreds holds the parsed MFA information supplied over the API as part of + // X-Vault-MFA header + MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` + + // Cached token entry. This avoids another lookup in request handling when + // we've already looked it up at http handling time. Note that this token + // has not been "used", as in it will not properly take into account use + // count limitations. As a result this field should only ever be used for + // transport to a function that would otherwise do a lookup and then + // properly use the token. + tokenEntry *TokenEntry + + // For replication, contains the last WAL on the remote side after handling + // the request, used for best-effort avoidance of stale read-after-write + lastRemoteWAL uint64 + + // ControlGroup holds the authorizations that have happened on this + // request + ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""` + + // ClientTokenSource tells us where the client token was sourced from, so + // we can delete it before sending off to plugins + ClientTokenSource ClientTokenSource + + // HTTPRequest, if set, can be used to access fields from the HTTP request + // that generated this logical.Request object, such as the request body. + HTTPRequest *http.Request `json:"-" sentinel:""` + + // ResponseWriter if set can be used to stream a response value to the http + // request that generated this logical.Request object. + ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` +} + +// Get returns a data field and guards for nil Data +func (r *Request) Get(key string) interface{} { + if r.Data == nil { + return nil + } + return r.Data[key] +} + +// GetString returns a data field as a string +func (r *Request) GetString(key string) string { + raw := r.Get(key) + s, _ := raw.(string) + return s +} + +func (r *Request) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +func (r *Request) SentinelGet(key string) (interface{}, error) { + switch key { + case "path": + // Sanitize it here so that it's consistent in policies + return strings.TrimPrefix(r.Path, "/"), nil + + case "wrapping", "wrap_info": + // If the pointer is nil accessing the wrap info is considered + // "undefined" so this allows us to instead discover a TTL of zero + if r.WrapInfo == nil { + return &RequestWrapInfo{}, nil + } + return r.WrapInfo, nil + } + + return nil, nil +} + +func (r *Request) SentinelKeys() []string { + return []string{ + "path", + "wrapping", + "wrap_info", + } +} + +func (r *Request) LastRemoteWAL() uint64 { + return r.lastRemoteWAL +} + +func (r *Request) SetLastRemoteWAL(last uint64) { + r.lastRemoteWAL = last +} + +func (r *Request) TokenEntry() *TokenEntry { + return r.tokenEntry +} + +func (r *Request) SetTokenEntry(te *TokenEntry) { + r.tokenEntry = te +} + +// RenewRequest creates the structure of the renew request. +func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RenewAuthRequest creates the structure of the renew request for an auth. +func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Auth: auth, + } +} + +// RevokeRequest creates the structure of the revoke request. +func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RevokeOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RollbackRequest creates the structure of the revoke request. +func RollbackRequest(path string) *Request { + return &Request{ + Operation: RollbackOperation, + Path: path, + Data: make(map[string]interface{}), + } +} + +// Operation is an enum that is used to specify the type +// of request being made +type Operation string + +const ( + // The operations below are called per path + CreateOperation Operation = "create" + ReadOperation = "read" + UpdateOperation = "update" + DeleteOperation = "delete" + ListOperation = "list" + HelpOperation = "help" + AliasLookaheadOperation = "alias-lookahead" + + // The operations below are called globally, the path is less relevant. + RevokeOperation Operation = "revoke" + RenewOperation = "renew" + RollbackOperation = "rollback" +) + +type MFACreds map[string][]string + +// InitializationRequest stores the parameters and context of an Initialize() +// call being made to a logical.Backend. +type InitializationRequest struct { + + // Storage can be used to durably store and retrieve state. + Storage Storage +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response.go b/vendor/github.com/hashicorp/vault/sdk/logical/response.go new file mode 100644 index 00000000..fb799ba4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response.go @@ -0,0 +1,213 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "sync/atomic" + + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +const ( + // HTTPContentType can be specified in the Data field of a Response + // so that the HTTP front end can specify a custom Content-Type associated + // with the HTTPRawBody. This can only be used for non-secrets, and should + // be avoided unless absolutely necessary, such as implementing a specification. + // The value must be a string. + HTTPContentType = "http_content_type" + + // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be a byte slice. + HTTPRawBody = "http_raw_body" + + // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be an integer. + HTTPStatusCode = "http_status_code" + + // For unwrapping we may need to know whether the value contained in the + // raw body is already JSON-unmarshaled. The presence of this key indicates + // that it has already been unmarshaled. That way we don't need to simply + // ignore errors. + HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded" + + // If set, HTTPRawCacheControl will replace the default Cache-Control=no-store header + // set by the generic wrapping handler. The value must be a string. + HTTPRawCacheControl = "http_raw_cache_control" +) + +// Response is a struct that stores the response of a request. +// It is used to abstract the details of the higher level request protocol. +type Response struct { + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"` + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"` + + // Response data is an opaque map that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"` + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"` + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` + + // Information for wrapping the response in a cubbyhole + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` + + // Headers will contain the http headers from the plugin that it wishes to + // have as part of the output + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` +} + +// AddWarning adds a warning into the response's warning list +func (r *Response) AddWarning(warning string) { + if r.Warnings == nil { + r.Warnings = make([]string, 0, 1) + } + r.Warnings = append(r.Warnings, warning) +} + +// IsError returns true if this response seems to indicate an error. +func (r *Response) IsError() bool { + return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil +} + +func (r *Response) Error() error { + if !r.IsError() { + return nil + } + switch r.Data["error"].(type) { + case string: + return errors.New(r.Data["error"].(string)) + case error: + return r.Data["error"].(error) + } + return nil +} + +// HelpResponse is used to format a help response +func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response { + return &Response{ + Data: map[string]interface{}{ + "help": text, + "see_also": seeAlso, + "openapi": oapiDoc, + }, + } +} + +// ErrorResponse is used to format an error response +func ErrorResponse(text string, vargs ...interface{}) *Response { + if len(vargs) > 0 { + text = fmt.Sprintf(text, vargs...) + } + return &Response{ + Data: map[string]interface{}{ + "error": text, + }, + } +} + +// ListResponse is used to format a response to a list operation. +func ListResponse(keys []string) *Response { + resp := &Response{ + Data: map[string]interface{}{}, + } + if len(keys) != 0 { + resp.Data["keys"] = keys + } + return resp +} + +// ListResponseWithInfo is used to format a response to a list operation and +// return the keys as well as a map with corresponding key info. +func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { + resp := ListResponse(keys) + + keyInfoData := make(map[string]interface{}) + for _, key := range keys { + val, ok := keyInfo[key] + if ok { + keyInfoData[key] = val + } + } + + if len(keyInfoData) > 0 { + resp.Data["key_info"] = keyInfoData + } + + return resp +} + +// RespondWithStatusCode takes a response and converts it to a raw response with +// the provided Status Code. +func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) { + ret := &Response{ + Data: map[string]interface{}{ + HTTPContentType: "application/json", + HTTPStatusCode: code, + }, + } + + if resp != nil { + httpResp := LogicalResponseToHTTPResponse(resp) + + if req != nil { + httpResp.RequestID = req.ID + } + + body, err := json.Marshal(httpResp) + if err != nil { + return nil, err + } + + // We default to string here so that the value is HMAC'd via audit. + // Since this function is always marshaling to JSON, this is + // appropriate. + ret.Data[HTTPRawBody] = string(body) + } + + return ret, nil +} + +// HTTPResponseWriter is optionally added to a request object and can be used to +// write directly to the HTTP response writter. +type HTTPResponseWriter struct { + http.ResponseWriter + written *uint32 +} + +// NewHTTPResponseWriter creates a new HTTPRepoinseWriter object that wraps the +// provided io.Writer. +func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter { + return &HTTPResponseWriter{ + ResponseWriter: w, + written: new(uint32), + } +} + +// Write will write the bytes to the underlying io.Writer. +func (rw *HTTPResponseWriter) Write(bytes []byte) (int, error) { + atomic.StoreUint32(rw.written, 1) + + return rw.ResponseWriter.Write(bytes) +} + +// Written tells us if the writer has been written to yet. +func (rw *HTTPResponseWriter) Written() bool { + return atomic.LoadUint32(rw.written) == 1 +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go new file mode 100644 index 00000000..ee57f8e0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go @@ -0,0 +1,166 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RespondErrorCommon pulls most of the functionality from http's +// respondErrorCommon and some of http's handleLogical and makes it available +// to both the http package and elsewhere. +func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { + if err == nil && (resp == nil || !resp.IsError()) { + switch { + case req.Operation == ReadOperation: + if resp == nil { + return http.StatusNotFound, nil + } + + // Basically: if we have empty "keys" or no keys at all, 404. This + // provides consistency with GET. + case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil): + if resp == nil { + return http.StatusNotFound, nil + } + if len(resp.Data) == 0 { + if len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + keysRaw, ok := resp.Data["keys"] + if !ok || keysRaw == nil { + // If we don't have keys but have other data, return as-is + if len(resp.Data) > 0 || len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + + var keys []string + switch keysRaw.(type) { + case []interface{}: + keys = make([]string, len(keysRaw.([]interface{}))) + for i, el := range keysRaw.([]interface{}) { + s, ok := el.(string) + if !ok { + return http.StatusInternalServerError, nil + } + keys[i] = s + } + + case []string: + keys = keysRaw.([]string) + default: + return http.StatusInternalServerError, nil + } + + if len(keys) == 0 { + return http.StatusNotFound, nil + } + } + + return 0, nil + } + + if errwrap.ContainsType(err, new(ReplicationCodedError)) { + var allErrors error + var codedErr *ReplicationCodedError + errwrap.Walk(err, func(inErr error) { + newErr, ok := inErr.(*ReplicationCodedError) + if ok { + codedErr = newErr + } else { + allErrors = multierror.Append(allErrors, inErr) + } + }) + if allErrors != nil { + return codedErr.Code, multierror.Append(errors.New(fmt.Sprintf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg)), allErrors) + } + return codedErr.Code, errors.New(codedErr.Msg) + } + + // Start out with internal server error since in most of these cases there + // won't be a response so this won't be overridden + statusCode := http.StatusInternalServerError + // If we actually have a response, start out with bad request + if resp != nil { + statusCode = http.StatusBadRequest + } + + // Now, check the error itself; if it has a specific logical error, set the + // appropriate code + if err != nil { + switch { + case errwrap.ContainsType(err, new(StatusBadRequest)): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrPermissionDenied.Error()): + statusCode = http.StatusForbidden + case errwrap.Contains(err, ErrUnsupportedOperation.Error()): + statusCode = http.StatusMethodNotAllowed + case errwrap.Contains(err, ErrUnsupportedPath.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrInvalidRequest.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUpstreamRateLimited.Error()): + statusCode = http.StatusBadGateway + } + } + + if resp != nil && resp.IsError() { + err = fmt.Errorf("%s", resp.Data["error"].(string)) + } + + return statusCode, err +} + +// AdjustErrorStatusCode adjusts the status that will be sent in error +// conditions in a way that can be shared across http's respondError and other +// locations. +func AdjustErrorStatusCode(status *int, err error) { + // Handle nested errors + if t, ok := err.(*multierror.Error); ok { + for _, e := range t.Errors { + AdjustErrorStatusCode(status, e) + } + } + + // Adjust status code when sealed + if errwrap.Contains(err, consts.ErrSealed.Error()) { + *status = http.StatusServiceUnavailable + } + + // Adjust status code on + if errwrap.Contains(err, "http: request body too large") { + *status = http.StatusRequestEntityTooLarge + } + + // Allow HTTPCoded error passthrough to specify a code + if t, ok := err.(HTTPCodedError); ok { + *status = t.Code() + } +} + +func RespondError(w http.ResponseWriter, status int, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorResponse struct { + Errors []string `json:"errors"` + } + resp := &ErrorResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + + enc := json.NewEncoder(w) + enc.Encode(resp) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/secret.go b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go new file mode 100644 index 00000000..a2128d86 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go @@ -0,0 +1,30 @@ +package logical + +import "fmt" + +// Secret represents the secret part of a response. +type Secret struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" sentinel:""` + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `sentinel:""` +} + +func (s *Secret) Validate() error { + if s.TTL < 0 { + return fmt.Errorf("ttl duration must not be less than zero") + } + + return nil +} + +func (s *Secret) GoString() string { + return fmt.Sprintf("*%#v", *s) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go new file mode 100644 index 00000000..0802ad01 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go @@ -0,0 +1,158 @@ +package logical + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// ErrReadOnly is returned when a backend does not support +// writing. This can be caused by a read-only replica or secondary +// cluster operation. +var ErrReadOnly = errors.New("cannot write to readonly storage") + +// ErrSetupReadOnly is returned when a write operation is attempted on a +// storage while the backend is still being setup. +var ErrSetupReadOnly = errors.New("cannot write to storage during setup") + +// Storage is the way that logical backends are able read/write data. +type Storage interface { + List(context.Context, string) ([]string, error) + Get(context.Context, string) (*StorageEntry, error) + Put(context.Context, *StorageEntry) error + Delete(context.Context, string) error +} + +// StorageEntry is the entry for an item in a Storage implementation. +type StorageEntry struct { + Key string + Value []byte + SealWrap bool +} + +// DecodeJSON decodes the 'Value' present in StorageEntry. +func (e *StorageEntry) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSON(e.Value, out) +} + +// StorageEntryJSON creates a StorageEntry with a JSON-encoded value. +func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) { + encodedBytes, err := jsonutil.EncodeJSON(v) + if err != nil { + return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err) + } + + return &StorageEntry{ + Key: k, + Value: encodedBytes, + }, nil +} + +type ClearableView interface { + List(context.Context, string) ([]string, error) + Delete(context.Context, string) error +} + +// ScanView is used to scan all the keys in a view iteratively +func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + cb(fullPath) + } + } + } + return nil +} + +// CollectKeys is used to collect all the keys in a view +func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { + return CollectKeysWithPrefix(ctx, view, "") +} + +// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string +func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) { + var keys []string + + cb := func(path string) { + if strings.HasPrefix(path, prefix) { + keys = append(keys, path) + } + } + + // Scan for all the keys + if err := ScanView(ctx, view, cb); err != nil { + return nil, err + } + return keys, nil +} + +// ClearView is used to delete all the keys in a view +func ClearView(ctx context.Context, view ClearableView) error { + return ClearViewWithLogging(ctx, view, nil) +} + +func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { + if view == nil { + return nil + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + // Collect all the keys + keys, err := CollectKeys(ctx, view) + if err != nil { + return err + } + + logger.Debug("clearing view", "total_keys", len(keys)) + + // Delete all the keys + var pctDone int + for idx, key := range keys { + // Rather than keep trying to do stuff with a canceled context, bail; + // storage will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + if err := view.Delete(ctx, key); err != nil { + return err + } + + newPctDone := idx * 100.0 / len(keys) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) + } + } + + logger.Debug("view cleared") + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go new file mode 100644 index 00000000..4c7afa1b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go @@ -0,0 +1,67 @@ +package logical + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" +) + +// InmemStorage implements Storage and stores all data in memory. It is +// basically a straight copy of physical.Inmem, but it prevents backends from +// having to load all of physical's dependencies (which are legion) just to +// have some testing storage. +type InmemStorage struct { + underlying physical.Backend + once sync.Once +} + +func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + s.once.Do(s.init) + + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error { + s.once.Do(s.init) + + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *InmemStorage) Delete(ctx context.Context, key string) error { + s.once.Do(s.init) + + return s.underlying.Delete(ctx, key) +} + +func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) { + s.once.Do(s.init) + + return s.underlying.List(ctx, prefix) +} + +func (s *InmemStorage) Underlying() *inmem.InmemBackend { + s.once.Do(s.init) + + return s.underlying.(*inmem.InmemBackend) +} + +func (s *InmemStorage) init() { + s.underlying, _ = inmem.NewInmem(nil, nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go new file mode 100644 index 00000000..f0edc59f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go @@ -0,0 +1,114 @@ +package logical + +import ( + "context" + "errors" + "strings" +) + +type StorageView struct { + storage Storage + prefix string +} + +var ( + ErrRelativePath = errors.New("relative paths not supported") +) + +func NewStorageView(storage Storage, prefix string) *StorageView { + return &StorageView{ + storage: storage, + prefix: prefix, + } +} + +// logical.Storage impl. +func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) { + if err := s.SanityCheck(prefix); err != nil { + return nil, err + } + return s.storage.List(ctx, s.ExpandKey(prefix)) +} + +// logical.Storage impl. +func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) { + if err := s.SanityCheck(key); err != nil { + return nil, err + } + entry, err := s.storage.Get(ctx, s.ExpandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + if entry != nil { + entry.Key = s.TruncateKey(entry.Key) + } + + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +// logical.Storage impl. +func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error { + if entry == nil { + return errors.New("cannot write nil entry") + } + + if err := s.SanityCheck(entry.Key); err != nil { + return err + } + + expandedKey := s.ExpandKey(entry.Key) + + nested := &StorageEntry{ + Key: expandedKey, + Value: entry.Value, + SealWrap: entry.SealWrap, + } + + return s.storage.Put(ctx, nested) +} + +// logical.Storage impl. +func (s *StorageView) Delete(ctx context.Context, key string) error { + if err := s.SanityCheck(key); err != nil { + return err + } + + expandedKey := s.ExpandKey(key) + + return s.storage.Delete(ctx, expandedKey) +} + +func (s *StorageView) Prefix() string { + return s.prefix +} + +// SubView constructs a nested sub-view using the given prefix +func (s *StorageView) SubView(prefix string) *StorageView { + sub := s.ExpandKey(prefix) + return &StorageView{storage: s.storage, prefix: sub} +} + +// SanityCheck is used to perform a sanity check on a key +func (s *StorageView) SanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// ExpandKey is used to expand to the full key path with the prefix +func (s *StorageView) ExpandKey(suffix string) string { + return s.prefix + suffix +} + +// TruncateKey is used to remove the prefix of the key +func (s *StorageView) TruncateKey(full string) string { + return strings.TrimPrefix(full, s.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go new file mode 100644 index 00000000..ef88a432 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go @@ -0,0 +1,158 @@ +package logical + +import ( + "context" + "errors" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +// SystemView exposes system configuration information in a safe way +// for logical backends to consume +type SystemView interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL() time.Duration + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL() time.Duration + + // Returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted() bool + + // Returns true if caching is disabled. If true, no caches should be used, + // despite known slowdowns. + CachingDisabled() bool + + // When run from a system view attached to a request, indicates whether the + // request is affecting a local mount or not + LocalMount() bool + + // ReplicationState indicates the state of cluster replication + ReplicationState() consts.ReplicationState + + // HasFeature returns true if the feature is currently enabled + HasFeature(feature license.Features) bool + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool + + // EntityInfo returns a subset of information related to the identity entity + // for the given entity id + EntityInfo(entityID string) (*Entity, error) + + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context) (*PluginEnvironment, error) +} + +type ExtendedSystemView interface { + Auditor() Auditor + ForwardGenericRequest(context.Context, *Request) (*Response, error) +} + +type StaticSystemView struct { + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + Features license.Features + VaultVersion string + PluginEnvironment *PluginEnvironment +} + +type noopAuditor struct{} + +func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { + return nil +} + +func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { + return nil +} + +func (d StaticSystemView) Auditor() Auditor { + return noopAuditor{} +} + +func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { + return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") +} + +func (d StaticSystemView) DefaultLeaseTTL() time.Duration { + return d.DefaultLeaseTTLVal +} + +func (d StaticSystemView) MaxLeaseTTL() time.Duration { + return d.MaxLeaseTTLVal +} + +func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool { + return d.SudoPrivilegeVal +} + +func (d StaticSystemView) Tainted() bool { + return d.TaintedVal +} + +func (d StaticSystemView) CachingDisabled() bool { + return d.CachingDisabledVal +} + +func (d StaticSystemView) LocalMount() bool { + return d.LocalMountVal +} + +func (d StaticSystemView) ReplicationState() consts.ReplicationState { + return d.ReplicationStateVal +} + +func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} + +func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) { + return d.EntityVal, nil +} + +func (d StaticSystemView) HasFeature(feature license.Features) bool { + return d.Features.HasFeature(feature) +} + +func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) { + return d.PluginEnvironment, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/testing.go b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go new file mode 100644 index 00000000..da435c90 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go @@ -0,0 +1,86 @@ +package logical + +import ( + "context" + "reflect" + "time" + + testing "github.com/mitchellh/go-testing-interface" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// TestRequest is a helper to create a purely in-memory Request struct. +func TestRequest(t testing.T, op Operation, path string) *Request { + return &Request{ + Operation: op, + Path: path, + Data: make(map[string]interface{}), + Storage: new(InmemStorage), + Connection: &Connection{}, + } +} + +// TestStorage is a helper that can be used from unit tests to verify +// the behavior of a Storage impl. +func TestStorage(t testing.T, s Storage) { + keys, err := s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } + + entry := &StorageEntry{Key: "foo", Value: []byte("bar")} + if err := s.Put(context.Background(), entry); err != nil { + t.Fatalf("put error: %s", err) + } + + actual, err := s.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get error: %s", err) + } + if !reflect.DeepEqual(actual, entry) { + t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if !reflect.DeepEqual(keys, []string{"foo"}) { + t.Fatalf("bad keys: %#v", keys) + } + + if err := s.Delete(context.Background(), "foo"); err != nil { + t.Fatalf("put error: %s", err) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } +} + +func TestSystemView() *StaticSystemView { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 2 + return &StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + } +} + +func TestBackendConfig() *BackendConfig { + bc := &BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + System: TestSystemView(), + } + + return bc +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/token.go b/vendor/github.com/hashicorp/vault/sdk/logical/token.go new file mode 100644 index 00000000..38185fb2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/token.go @@ -0,0 +1,225 @@ +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +type TokenType uint8 + +const ( + // TokenTypeDefault means "use the default, if any, that is currently set + // on the mount". If not set, results in a Service token. + TokenTypeDefault TokenType = iota + + // TokenTypeService is a "normal" Vault token for long-lived services + TokenTypeService + + // TokenTypeBatch is a batch token + TokenTypeBatch + + // TokenTypeDefaultService, configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Service tokens + TokenTypeDefaultService + + // TokenTypeDefaultBatch, configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Batch tokens + TokenTypeDefaultBatch +) + +func (t *TokenType) UnmarshalJSON(b []byte) error { + if len(b) == 1 { + *t = TokenType(b[0] - '0') + return nil + } + + // Handle upgrade from pre-1.2 where we were serialized as string: + s := string(b) + switch s { + case `"default"`, `""`: + *t = TokenTypeDefault + case `"service"`: + *t = TokenTypeService + case `"batch"`: + *t = TokenTypeBatch + case `"default-service"`: + *t = TokenTypeDefaultService + case `"default-batch"`: + *t = TokenTypeDefaultBatch + default: + return fmt.Errorf("unknown token type %q", s) + } + return nil +} + +func (t TokenType) String() string { + switch t { + case TokenTypeDefault: + return "default" + case TokenTypeService: + return "service" + case TokenTypeBatch: + return "batch" + case TokenTypeDefaultService: + return "default-service" + case TokenTypeDefaultBatch: + return "default-batch" + default: + panic("unreachable") + } +} + +// TokenEntry is used to represent a given token +type TokenEntry struct { + Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` + + // ID of this entry, generally a random UUID + ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` + + // Accessor for this token, a random UUID + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` + + // Parent token, used for revocation trees + Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` + + // Which named policies should be used + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // Used for audit trails, this is something like "auth/user/login" + Path string `json:"path" mapstructure:"path" structs:"path"` + + // Used for auditing. This could include things like "source", "user", "ip" + Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` + + // Used for operators to be able to associate with the source + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Used to restrict the number of uses (zero is unlimited). This is to + // support one-time-tokens (generalized). There are a few special values: + // if it's -1 it has run through its use counts and is executing its final + // use; if it's -2 it is tainted, which means revocation is currently + // running on it; and if it's -3 it's also tainted but revocation + // previously ran and failed, so this hints the tidy function to try it + // again. + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Time of token creation + CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` + + // Duration set when token was created + TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` + + // Explicit maximum TTL on the token + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` + + // If set, the role that was used for parameters at creation time + Role string `json:"role" mapstructure:"role" structs:"role"` + + // If set, the period of the token. This is only used when created directly + // through the create endpoint; periods managed by roles or other auth + // backends are subject to those renewal rules. + Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` + + // These are the deprecated fields + DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` + NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` + CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` + ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` + + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""` + + // NamespaceID is the identifier of the namespace to which this token is + // confined to. Do not return this value over the API when the token is + // being looked up. + NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""` + + // CubbyholeID is the identifier of the cubbyhole storage belonging to this + // token + CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` +} + +func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { + if te == nil { + return nil, nil + } + switch key { + case "policies": + return te.Policies, nil + + case "path": + return te.Path, nil + + case "display_name": + return te.DisplayName, nil + + case "num_uses": + return te.NumUses, nil + + case "role": + return te.Role, nil + + case "entity_id": + return te.EntityID, nil + + case "period": + return te.Period, nil + + case "period_seconds": + return int64(te.Period.Seconds()), nil + + case "explicit_max_ttl": + return te.ExplicitMaxTTL, nil + + case "explicit_max_ttl_seconds": + return int64(te.ExplicitMaxTTL.Seconds()), nil + + case "creation_ttl": + return te.TTL, nil + + case "creation_ttl_seconds": + return int64(te.TTL.Seconds()), nil + + case "creation_time": + return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil + + case "creation_time_unix": + return time.Unix(te.CreationTime, 0), nil + + case "meta", "metadata": + return te.Meta, nil + + case "type": + teType := te.Type + switch teType { + case TokenTypeBatch, TokenTypeService: + case TokenTypeDefault: + teType = TokenTypeService + default: + return "unknown", nil + } + return teType.String(), nil + } + + return nil, nil +} + +func (te *TokenEntry) SentinelKeys() []string { + return []string{ + "period", + "period_seconds", + "explicit_max_ttl", + "explicit_max_ttl_seconds", + "creation_ttl", + "creation_ttl_seconds", + "creation_time", + "creation_time_unix", + "meta", + "metadata", + "type", + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go new file mode 100644 index 00000000..6f0ff342 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go @@ -0,0 +1,157 @@ +package logical + +import ( + "bytes" + "encoding/json" + "fmt" + "time" +) + +// This logic was pulled from the http package so that it can be used for +// encoding wrapped responses as well. It simply translates the logical +// response to an http response, with the values we want and omitting the +// values we don't. +func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { + httpResp := &HTTPResponse{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.Secret != nil { + httpResp.LeaseID = input.Secret.LeaseID + httpResp.Renewable = input.Secret.Renewable + httpResp.LeaseDuration = int(input.Secret.TTL.Seconds()) + } + + // If we have authentication information, then + // set up the result structure. + if input.Auth != nil { + httpResp.Auth = &HTTPAuth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + LeaseDuration: int(input.Auth.TTL.Seconds()), + Renewable: input.Auth.Renewable, + EntityID: input.Auth.EntityID, + TokenType: input.Auth.TokenType.String(), + Orphan: input.Auth.Orphan, + } + } + + return httpResp +} + +func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { + logicalResp := &Response{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.LeaseID != "" { + logicalResp.Secret = &Secret{ + LeaseID: input.LeaseID, + } + logicalResp.Secret.Renewable = input.Renewable + logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration) + } + + if input.Auth != nil { + logicalResp.Auth = &Auth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + EntityID: input.Auth.EntityID, + Orphan: input.Auth.Orphan, + } + logicalResp.Auth.Renewable = input.Auth.Renewable + logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration) + switch input.Auth.TokenType { + case "service": + logicalResp.Auth.TokenType = TokenTypeService + case "batch": + logicalResp.Auth.TokenType = TokenTypeBatch + } + } + + return logicalResp +} + +type HTTPResponse struct { + RequestID string `json:"request_id"` + LeaseID string `json:"lease_id"` + Renewable bool `json:"renewable"` + LeaseDuration int `json:"lease_duration"` + Data map[string]interface{} `json:"data"` + WrapInfo *HTTPWrapInfo `json:"wrap_info"` + Warnings []string `json:"warnings"` + Headers map[string][]string `json:"-"` + Auth *HTTPAuth `json:"auth"` +} + +type HTTPAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + EntityID string `json:"entity_id"` + TokenType string `json:"token_type"` + Orphan bool `json:"orphan"` +} + +type HTTPWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type HTTPSysInjector struct { + Response *HTTPResponse +} + +func (h HTTPSysInjector) MarshalJSON() ([]byte, error) { + j, err := json.Marshal(h.Response) + if err != nil { + return nil, err + } + // Fast path no data or empty data + if h.Response.Data == nil || len(h.Response.Data) == 0 { + return j, nil + } + // Marshaling a response will always be a JSON object, meaning it will + // always start with '{', so we hijack this to prepend necessary values + // Make a guess at the capacity, and write the object opener + buf := bytes.NewBuffer(make([]byte, 0, len(j)*2)) + buf.WriteRune('{') + for k, v := range h.Response.Data { + // Marshal each key/value individually + mk, err := json.Marshal(k) + if err != nil { + return nil, err + } + mv, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Write into the final buffer. We'll never have a valid response + // without any fields so we can unconditionally add a comma after each. + buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv)) + } + // Add the rest, without the first '{' + buf.Write(j[1:]) + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go new file mode 100644 index 00000000..15a77fbc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go @@ -0,0 +1,250 @@ +package physical + +import ( + "context" + "sync/atomic" + + log "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for NewCache + DefaultCacheSize = 128 * 1024 + + // refreshCacheCtxKey is a ctx value that denotes the cache should be + // refreshed during a Get call. + refreshCacheCtxKey = "refresh_cache" +) + +// These paths don't need to be cached by the LRU cache. This should +// particularly help memory pressure when unsealing. +var cacheExceptionsPaths = []string{ + "wal/logs/", + "index/pages/", + "index-dr/pages/", + "sys/expire/", + "core/poison-pill", + "core/raft/tls", +} + +// CacheRefreshContext returns a context with an added value denoting if the +// cache should attempt a refresh. +func CacheRefreshContext(ctx context.Context, r bool) context.Context { + return context.WithValue(ctx, refreshCacheCtxKey, r) +} + +// cacheRefreshFromContext is a helper to look up if the provided context is +// requesting a cache refresh. +func cacheRefreshFromContext(ctx context.Context) bool { + r, ok := ctx.Value(refreshCacheCtxKey).(bool) + if !ok { + return false + } + return r +} + +// Cache is used to wrap an underlying physical backend +// and provide an LRU cache layer on top. Most of the reads done by +// Vault are for policy objects so there is a large read reduction +// by using a simple write-through cache. +type Cache struct { + backend Backend + lru *lru.TwoQueueCache + locks []*locksutil.LockEntry + logger log.Logger + enabled *uint32 + cacheExceptions *pathmanager.PathManager +} + +// TransactionalCache is a Cache that wraps the physical that is transactional +type TransactionalCache struct { + *Cache + Transactional +} + +// Verify Cache satisfies the correct interfaces +var _ ToggleablePurgemonster = (*Cache)(nil) +var _ ToggleablePurgemonster = (*TransactionalCache)(nil) +var _ Backend = (*Cache)(nil) +var _ Transactional = (*TransactionalCache)(nil) + +// NewCache returns a physical cache of the given size. +// If no size is provided, the default size is used. +func NewCache(b Backend, size int, logger log.Logger) *Cache { + if logger.IsDebug() { + logger.Debug("creating LRU cache", "size", size) + } + if size <= 0 { + size = DefaultCacheSize + } + + pm := pathmanager.New() + pm.AddPaths(cacheExceptionsPaths) + + cache, _ := lru.New2Q(size) + c := &Cache{ + backend: b, + lru: cache, + locks: locksutil.CreateLocks(), + logger: logger, + // This fails safe. + enabled: new(uint32), + cacheExceptions: pm, + } + return c +} + +func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache { + c := &TransactionalCache{ + Cache: NewCache(b, size, logger), + Transactional: b.(Transactional), + } + return c +} + +func (c *Cache) ShouldCache(key string) bool { + if atomic.LoadUint32(c.enabled) == 0 { + return false + } + + return !c.cacheExceptions.HasPath(key) +} + +// SetEnabled is used to toggle whether the cache is on or off. It must be +// called with true to actually activate the cache after creation. +func (c *Cache) SetEnabled(enabled bool) { + if enabled { + atomic.StoreUint32(c.enabled, 1) + return + } + atomic.StoreUint32(c.enabled, 0) +} + +// Purge is used to clear the cache +func (c *Cache) Purge(ctx context.Context) { + // Lock the world + for _, lock := range c.locks { + lock.Lock() + defer lock.Unlock() + } + + c.lru.Purge() +} + +func (c *Cache) Put(ctx context.Context, entry *Entry) error { + if entry != nil && !c.ShouldCache(entry.Key) { + return c.backend.Put(ctx, entry) + } + + lock := locksutil.LockForKey(c.locks, entry.Key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Put(ctx, entry) + if err == nil { + c.lru.Add(entry.Key, entry) + } + return err +} + +func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) { + if !c.ShouldCache(key) { + return c.backend.Get(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.RLock() + defer lock.RUnlock() + + // Check the LRU first + if !cacheRefreshFromContext(ctx) { + if raw, ok := c.lru.Get(key); ok { + if raw == nil { + return nil, nil + } + return raw.(*Entry), nil + } + } + + // Read from the underlying backend + ent, err := c.backend.Get(ctx, key) + if err != nil { + return nil, err + } + + // Cache the result + c.lru.Add(key, ent) + + return ent, nil +} + +func (c *Cache) Delete(ctx context.Context, key string) error { + if !c.ShouldCache(key) { + return c.backend.Delete(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Delete(ctx, key) + if err == nil { + c.lru.Remove(key) + } + return err +} + +func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) { + // Always pass-through as this would be difficult to cache. For the same + // reason we don't lock as we can't reasonably know which locks to readlock + // ahead of time. + return c.backend.List(ctx, prefix) +} + +func (c *TransactionalCache) Locks() []*locksutil.LockEntry { + return c.locks +} + +func (c *TransactionalCache) LRU() *lru.TwoQueueCache { + return c.lru +} + +func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error { + // Bypass the locking below + if atomic.LoadUint32(c.enabled) == 0 { + return c.Transactional.Transaction(ctx, txns) + } + + // Collect keys that need to be locked + var keys []string + for _, curr := range txns { + keys = append(keys, curr.Entry.Key) + } + // Lock the keys + for _, l := range locksutil.LocksForKeys(c.locks, keys) { + l.Lock() + defer l.Unlock() + } + + if err := c.Transactional.Transaction(ctx, txns); err != nil { + return err + } + + for _, txn := range txns { + if !c.ShouldCache(txn.Entry.Key) { + continue + } + + switch txn.Operation { + case PutOperation: + c.lru.Add(txn.Entry.Key, txn.Entry) + case DeleteOperation: + c.lru.Remove(txn.Entry.Key) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go new file mode 100644 index 00000000..d2f93478 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go @@ -0,0 +1,104 @@ +package physical + +import ( + "context" + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") +var ErrNonPrintable = errors.New("key contains non-printable characters") + +// StorageEncoding is used to add errors into underlying physical requests +type StorageEncoding struct { + Backend +} + +// TransactionalStorageEncoding is the transactional version of the error +// injector +type TransactionalStorageEncoding struct { + *StorageEncoding + Transactional +} + +// Verify StorageEncoding satisfies the correct interfaces +var _ Backend = (*StorageEncoding)(nil) +var _ Transactional = (*TransactionalStorageEncoding)(nil) + +// NewStorageEncoding returns a wrapped physical backend and verifies the key +// encoding +func NewStorageEncoding(b Backend) Backend { + enc := &StorageEncoding{ + Backend: b, + } + + if bTxn, ok := b.(Transactional); ok { + return &TransactionalStorageEncoding{ + StorageEncoding: enc, + Transactional: bTxn, + } + } + + return enc +} + +func (e *StorageEncoding) containsNonPrintableChars(key string) bool { + idx := strings.IndexFunc(key, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + return idx != -1 +} + +func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error { + if !utf8.ValidString(entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(entry.Key) { + return ErrNonPrintable + } + + return e.Backend.Put(ctx, entry) +} + +func (e *StorageEncoding) Delete(ctx context.Context, key string) error { + if !utf8.ValidString(key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(key) { + return ErrNonPrintable + } + + return e.Backend.Delete(ctx, key) +} + +func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error { + for _, txn := range txns { + if !utf8.ValidString(txn.Entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(txn.Entry.Key) { + return ErrNonPrintable + } + + } + + return e.Transactional.Transaction(ctx, txns) +} + +func (e *StorageEncoding) Purge(ctx context.Context) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} + +func (e *StorageEncoding) SetEnabled(enabled bool) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.SetEnabled(enabled) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go new file mode 100644 index 00000000..a662dfdd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go @@ -0,0 +1,17 @@ +package physical + +// Entry is used to represent data stored by the physical backend +type Entry struct { + Key string + Value []byte + SealWrap bool `json:"seal_wrap,omitempty"` + + // Only used in replication + ValueHash []byte + + // The bool above is an easy control for whether it should be enabled; it + // is used to carry information about whether seal wrapping is *desired* + // regardless of whether it's currently available. The struct below stores + // needed information when it's actually performed. + SealWrapInfo *EncryptedBlobInfo `json:"seal_wrap_info,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/error.go b/vendor/github.com/hashicorp/vault/sdk/physical/error.go new file mode 100644 index 00000000..8091f178 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/error.go @@ -0,0 +1,108 @@ +package physical + +import ( + "context" + "errors" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" +) + +const ( + // DefaultErrorPercent is used to determin how often we error + DefaultErrorPercent = 20 +) + +// ErrorInjector is used to add errors into underlying physical requests +type ErrorInjector struct { + backend Backend + errorPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalErrorInjector is the transactional version of the error +// injector +type TransactionalErrorInjector struct { + *ErrorInjector + Transactional +} + +// Verify ErrorInjector satisfies the correct interfaces +var _ Backend = (*ErrorInjector)(nil) +var _ Transactional = (*TransactionalErrorInjector)(nil) + +// NewErrorInjector returns a wrapped physical backend to inject error +func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { + if errorPercent < 0 || errorPercent > 100 { + errorPercent = DefaultErrorPercent + } + logger.Info("creating error injector") + + return &ErrorInjector{ + backend: b, + errorPercent: errorPercent, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalErrorInjector creates a new transactional ErrorInjector +func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector { + return &TransactionalErrorInjector{ + ErrorInjector: NewErrorInjector(b, errorPercent, logger), + Transactional: b.(Transactional), + } +} + +func (e *ErrorInjector) SetErrorPercentage(p int) { + e.errorPercent = p +} + +func (e *ErrorInjector) addError() error { + e.randomLock.Lock() + roll := e.random.Intn(100) + e.randomLock.Unlock() + if roll < e.errorPercent { + return errors.New("random error") + } + + return nil +} + +func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Put(ctx, entry) +} + +func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.Get(ctx, key) +} + +func (e *ErrorInjector) Delete(ctx context.Context, key string) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Delete(ctx, key) +} + +func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.List(ctx, prefix) +} + +func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + if err := e.addError(); err != nil { + return err + } + return e.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go new file mode 100644 index 00000000..9739a758 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go @@ -0,0 +1,290 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" + + radix "github.com/armon/go-radix" +) + +// Verify interfaces are satisfied +var _ physical.Backend = (*InmemBackend)(nil) +var _ physical.HABackend = (*InmemHABackend)(nil) +var _ physical.HABackend = (*TransactionalInmemHABackend)(nil) +var _ physical.Lock = (*InmemLock)(nil) +var _ physical.Transactional = (*TransactionalInmemBackend)(nil) +var _ physical.Transactional = (*TransactionalInmemHABackend)(nil) + +var ( + PutDisabledError = errors.New("put operations disabled in inmem backend") + GetDisabledError = errors.New("get operations disabled in inmem backend") + DeleteDisabledError = errors.New("delete operations disabled in inmem backend") + ListDisabledError = errors.New("list operations disabled in inmem backend") +) + +// InmemBackend is an in-memory only physical backend. It is useful +// for testing and development situations where the data is not +// expected to be durable. +type InmemBackend struct { + sync.RWMutex + root *radix.Tree + permitPool *physical.PermitPool + logger log.Logger + failGet *uint32 + failPut *uint32 + failDelete *uint32 + failList *uint32 + logOps bool + maxValueSize int +} + +type TransactionalInmemBackend struct { + InmemBackend +} + +// NewInmem constructs a new in-memory backend +func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, nil +} + +// Basically for now just creates a permit pool of size 1 so only one operation +// can run at a time +func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &TransactionalInmemBackend{ + InmemBackend: InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(1), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, + }, nil +} + +// Put is used to insert or update an entry +func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.PutInternal(ctx, entry) +} + +func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + if i.logOps { + i.logger.Trace("put", "key", entry.Key) + } + if atomic.LoadUint32(i.failPut) != 0 { + return PutDisabledError + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize { + return fmt.Errorf("%s", physical.ErrValueTooLarge) + } + + i.root.Insert(entry.Key, entry.Value) + return nil +} + +func (i *InmemBackend) FailPut(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failPut, val) +} + +// Get is used to fetch an entry +func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.GetInternal(ctx, key) +} + +func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + if i.logOps { + i.logger.Trace("get", "key", key) + } + if atomic.LoadUint32(i.failGet) != 0 { + return nil, GetDisabledError + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if raw, ok := i.root.Get(key); ok { + return &physical.Entry{ + Key: key, + Value: raw.([]byte), + }, nil + } + return nil, nil +} + +func (i *InmemBackend) FailGet(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGet, val) +} + +// Delete is used to permanently delete an entry +func (i *InmemBackend) Delete(ctx context.Context, key string) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.DeleteInternal(ctx, key) +} + +func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { + if i.logOps { + i.logger.Trace("delete", "key", key) + } + if atomic.LoadUint32(i.failDelete) != 0 { + return DeleteDisabledError + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + i.root.Delete(key) + return nil +} + +func (i *InmemBackend) FailDelete(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failDelete, val) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.ListInternal(ctx, prefix) +} + +func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { + if i.logOps { + i.logger.Trace("list", "prefix", prefix) + } + if atomic.LoadUint32(i.failList) != 0 { + return nil, ListDisabledError + } + + var out []string + seen := make(map[string]interface{}) + walkFn := func(s string, v interface{}) bool { + trimmed := strings.TrimPrefix(s, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + return false + } + i.root.WalkPrefix(prefix, walkFn) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + return out, nil +} + +func (i *InmemBackend) FailList(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failList, val) +} + +// Implements the transaction interface +func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + t.permitPool.Acquire() + defer t.permitPool.Release() + + t.Lock() + defer t.Unlock() + + return physical.GenericTransactionHandler(ctx, t, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go new file mode 100644 index 00000000..64fcb3a6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go @@ -0,0 +1,167 @@ +package inmem + +import ( + "fmt" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +type InmemHABackend struct { + physical.Backend + locks map[string]string + l *sync.Mutex + cond *sync.Cond + logger log.Logger +} + +type TransactionalInmemHABackend struct { + physical.Transactional + InmemHABackend +} + +// NewInmemHA constructs a new in-memory HA backend. This is only for testing. +func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + be, err := NewInmem(nil, logger) + if err != nil { + return nil, err + } + + in := &InmemHABackend{ + Backend: be, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + transInmem, err := NewTransactionalInmem(nil, logger) + if err != nil { + return nil, err + } + inmemHA := InmemHABackend{ + Backend: transInmem, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + + in := &TransactionalInmemHABackend{ + InmemHABackend: inmemHA, + Transactional: transInmem.(physical.Transactional), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { + l := &InmemLock{ + in: i, + key: key, + value: value, + } + return l, nil +} + +// LockMapSize is used in some tests to determine whether this backend has ever +// been used for HA purposes rather than simply for storage +func (i *InmemHABackend) LockMapSize() int { + return len(i.locks) +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (i *InmemHABackend) HAEnabled() bool { + return true +} + +// InmemLock is an in-memory Lock implementation for the HABackend +type InmemLock struct { + in *InmemHABackend + key string + value string + + held bool + leaderCh chan struct{} + l sync.Mutex +} + +func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.l.Lock() + defer i.l.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + releaseCh := make(chan bool, 1) + go func() { + // Wait to acquire the lock + i.in.l.Lock() + _, ok := i.in.locks[i.key] + for ok { + i.in.cond.Wait() + _, ok = i.in.locks[i.key] + } + i.in.locks[i.key] = i.value + i.in.l.Unlock() + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + } + }() + + // Wait for lock acquisition or shutdown + select { + case <-didLock: + releaseCh <- false + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + return i.leaderCh, nil +} + +func (i *InmemLock) Unlock() error { + i.l.Lock() + defer i.l.Unlock() + + if !i.held { + return nil + } + + close(i.leaderCh) + i.leaderCh = nil + i.held = false + + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + return nil +} + +func (i *InmemLock) Value() (bool, string, error) { + i.in.l.Lock() + val, ok := i.in.locks[i.key] + i.in.l.Unlock() + return ok, val, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go new file mode 100644 index 00000000..51bb560c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go @@ -0,0 +1,110 @@ +package physical + +import ( + "context" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + logger log.Logger + backend Backend + latency time.Duration + jitterPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// Verify LatencyInjector satisfies the correct interfaces +var _ Backend = (*LatencyInjector)(nil) +var _ Transactional = (*TransactionalLatencyInjector)(nil) + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("creating latency injector") + + return &LatencyInjector{ + logger: logger, + backend: b, + latency: latency, + jitterPercent: jitter, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) SetLatency(latency time.Duration) { + l.logger.Info("Changing backend latency", "latency", latency) + l.latency = latency +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + percent := 100 + if l.jitterPercent > 0 { + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + l.randomLock.Lock() + percent = l.random.Intn(max-min) + min + l.randomLock.Unlock() + } + latencyDuration := time.Duration(int(l.latency) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error { + l.addLatency() + return l.backend.Put(ctx, entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(ctx, key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(ctx context.Context, key string) error { + l.addLatency() + return l.backend.Delete(ctx, key) +} + +// List is a latent list request +func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(ctx, prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go new file mode 100644 index 00000000..ec98e905 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go @@ -0,0 +1,166 @@ +package physical + +import ( + "context" + "strings" + "sync" + + log "github.com/hashicorp/go-hclog" +) + +const DefaultParallelOperations = 128 + +// The operation type +type Operation string + +const ( + DeleteOperation Operation = "delete" + GetOperation = "get" + ListOperation = "list" + PutOperation = "put" +) + +const ( + ErrValueTooLarge = "put failed due to value being too large" +) + +// ShutdownSignal +type ShutdownChannel chan struct{} + +// Backend is the interface required for a physical +// backend. A physical backend is used to durably store +// data outside of Vault. As such, it is completely untrusted, +// and is only accessed via a security barrier. The backends +// must represent keys in a hierarchical manner. All methods +// are expected to be thread safe. +type Backend interface { + // Put is used to insert or update an entry + Put(ctx context.Context, entry *Entry) error + + // Get is used to fetch an entry + Get(ctx context.Context, key string) (*Entry, error) + + // Delete is used to permanently delete an entry + Delete(ctx context.Context, key string) error + + // List is used to list all the keys under a given + // prefix, up to the next prefix. + List(ctx context.Context, prefix string) ([]string, error) +} + +// HABackend is an extensions to the standard physical +// backend to support high-availability. Vault only expects to +// use mutual exclusion to allow multiple instances to act as a +// hot standby for a leader that services all requests. +type HABackend interface { + // LockWith is used for mutual exclusion based on the given key. + LockWith(key, value string) (Lock, error) + + // Whether or not HA functionality is enabled + HAEnabled() bool +} + +// ToggleablePurgemonster is an interface for backends that can toggle on or +// off special functionality and/or support purging. This is only used for the +// cache, don't use it for other things. +type ToggleablePurgemonster interface { + Purge(ctx context.Context) + SetEnabled(bool) +} + +// RedirectDetect is an optional interface that an HABackend +// can implement. If they do, a redirect address can be automatically +// detected. +type RedirectDetect interface { + // DetectHostAddr is used to detect the host address + DetectHostAddr() (string, error) +} + +// Callback signatures for RunServiceDiscovery +type ActiveFunction func() bool +type SealedFunction func() bool +type PerformanceStandbyFunction func() bool + +// ServiceDiscovery is an optional interface that an HABackend can implement. +// If they do, the state of a backend is advertised to the service discovery +// network. +type ServiceDiscovery interface { + // NotifyActiveStateChange is used by Core to notify a backend + // capable of ServiceDiscovery that this Vault instance has changed + // its status to active or standby. + NotifyActiveStateChange() error + + // NotifySealedStateChange is used by Core to notify a backend + // capable of ServiceDiscovery that Vault has changed its Sealed + // status to sealed or unsealed. + NotifySealedStateChange() error + + // NotifyPerformanceStandbyStateChange is used by Core to notify a backend + // capable of ServiceDiscovery that this Vault instance has changed it + // status to performance standby or standby. + NotifyPerformanceStandbyStateChange() error + + // Run executes any background service discovery tasks until the + // shutdown channel is closed. + RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc ActiveFunction, sealedFunc SealedFunction, perfStandbyFunc PerformanceStandbyFunction) error +} + +type Lock interface { + // Lock is used to acquire the given lock + // The stopCh is optional and if closed should interrupt the lock + // acquisition attempt. The return struct should be closed when + // leadership is lost. + Lock(stopCh <-chan struct{}) (<-chan struct{}, error) + + // Unlock is used to release the lock + Unlock() error + + // Returns the value of the lock and if it is held + Value() (bool, string, error) +} + +// Factory is the factory function to create a physical backend. +type Factory func(config map[string]string, logger log.Logger) (Backend, error) + +// PermitPool is used to limit maximum outstanding requests +type PermitPool struct { + sem chan int +} + +// NewPermitPool returns a new permit pool with the provided +// number of permits +func NewPermitPool(permits int) *PermitPool { + if permits < 1 { + permits = DefaultParallelOperations + } + return &PermitPool{ + sem: make(chan int, permits), + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPool) Acquire() { + c.sem <- 1 +} + +// Release returns a permit to the pool +func (c *PermitPool) Release() { + <-c.sem +} + +// Get number of requests in the permit pool +func (c *PermitPool) CurrentPermits() int { + return len(c.sem) +} + +// Prefixes is a shared helper function returns all parent 'folders' for a +// given vault key. +// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] +func Prefixes(s string) []string { + components := strings.Split(s, "/") + result := []string{} + for i := 1; i < len(components); i++ { + result = append(result, strings.Join(components[:i], "/")) + } + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go new file mode 100644 index 00000000..7497313a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go @@ -0,0 +1,40 @@ +package physical + +import ( + "context" +) + +// PhysicalAccess is a wrapper around physical.Backend that allows Core to +// expose its physical storage operations through PhysicalAccess() while +// restricting the ability to modify Core.physical itself. +type PhysicalAccess struct { + physical Backend +} + +var _ Backend = (*PhysicalAccess)(nil) + +func NewPhysicalAccess(physical Backend) *PhysicalAccess { + return &PhysicalAccess{physical: physical} +} + +func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error { + return p.physical.Put(ctx, entry) +} + +func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) { + return p.physical.Get(ctx, key) +} + +func (p *PhysicalAccess) Delete(ctx context.Context, key string) error { + return p.physical.Delete(ctx, key) +} + +func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) { + return p.physical.List(ctx, prefix) +} + +func (p *PhysicalAccess) Purge(ctx context.Context) { + if purgeable, ok := p.physical.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go new file mode 100644 index 00000000..da505a4f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go @@ -0,0 +1,98 @@ +package physical + +import ( + "context" + "errors" + "strings" +) + +var ( + ErrRelativePath = errors.New("relative paths not supported") +) + +// View represents a prefixed view of a physical backend +type View struct { + backend Backend + prefix string +} + +// Verify View satisfies the correct interfaces +var _ Backend = (*View)(nil) + +// NewView takes an underlying physical backend and returns +// a view of it that can only operate with the given prefix. +func NewView(backend Backend, prefix string) *View { + return &View{ + backend: backend, + prefix: prefix, + } +} + +// List the contents of the prefixed view +func (v *View) List(ctx context.Context, prefix string) ([]string, error) { + if err := v.sanityCheck(prefix); err != nil { + return nil, err + } + return v.backend.List(ctx, v.expandKey(prefix)) +} + +// Get the key of the prefixed view +func (v *View) Get(ctx context.Context, key string) (*Entry, error) { + if err := v.sanityCheck(key); err != nil { + return nil, err + } + entry, err := v.backend.Get(ctx, v.expandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + if entry != nil { + entry.Key = v.truncateKey(entry.Key) + } + + return &Entry{ + Key: entry.Key, + Value: entry.Value, + }, nil +} + +// Put the entry into the prefix view +func (v *View) Put(ctx context.Context, entry *Entry) error { + if err := v.sanityCheck(entry.Key); err != nil { + return err + } + + nested := &Entry{ + Key: v.expandKey(entry.Key), + Value: entry.Value, + } + return v.backend.Put(ctx, nested) +} + +// Delete the entry from the prefix view +func (v *View) Delete(ctx context.Context, key string) error { + if err := v.sanityCheck(key); err != nil { + return err + } + return v.backend.Delete(ctx, v.expandKey(key)) +} + +// sanityCheck is used to perform a sanity check on a key +func (v *View) sanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// expandKey is used to expand to the full key path with the prefix +func (v *View) expandKey(suffix string) string { + return v.prefix + suffix +} + +// truncateKey is used to remove the prefix of the key +func (v *View) truncateKey(full string) string { + return strings.TrimPrefix(full, v.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go new file mode 100644 index 00000000..0970b869 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go @@ -0,0 +1,497 @@ +package physical + +import ( + "context" + "reflect" + "sort" + "testing" + "time" +) + +func ExerciseBackend(t testing.TB, b Backend) { + t.Helper() + + // Should be empty + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("initial list failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("initial not empty: %v", keys) + } + + // Delete should work if it does not exist + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("idempotent delete: %v", err) + } + + // Get should not fail, but be nil + out, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("initial get failed: %v", err) + } + if out != nil { + t.Errorf("initial get was not nil: %v", out) + } + + // Make an entry + e := &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + // List should not be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list failed: %v", err) + } + if len(keys) != 1 || keys[0] != "foo" { + t.Errorf("keys[0] did not equal foo: %v", keys) + } + + // Delete should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete: %v", err) + } + + // Should be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list after delete: %v", err) + } + if len(keys) != 0 { + t.Errorf("list after delete not empty: %v", keys) + } + + // Get should fail + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get after delete: %v", err) + } + if out != nil { + t.Errorf("get after delete not nil: %v", out) + } + + // Multiple Puts should work; GH-189 + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 1 failed: %v", err) + } + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 2 failed: %v", err) + } + + // Make a nested entry + e = &Entry{Key: "foo/bar", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("nested put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list multi failed: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("expected 2 keys [foo, foo/]: %v", keys) + } + + // Delete with children should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete after multi: %v", err) + } + + // Get should return the child + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get after multi delete: %v", err) + } + if out == nil { + t.Errorf("get after multi delete not nil: %v", out) + } + + // Removal of nested secret should not leave artifacts + e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 || keys[0] != "bar" { + t.Errorf("should be exactly 1 key == bar: %v", keys) + } + + // Make a second nested entry to test prefix removal + e = &Entry{Key: "foo/zip", Value: []byte("zap")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("failed to create second nested: %v", err) + } + + // Delete should not remove the prefix + err = b.Delete(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("failed to delete nested prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list nested prefix: %v", err) + } + if len(keys) != 1 || keys[0] != "foo/" { + t.Errorf("should be exactly 1 key == foo/: %v", keys) + } + + // Delete should remove the prefix + err = b.Delete(context.Background(), "foo/zip") + if err != nil { + t.Fatalf("failed to delete second prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing after second delete failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } + + // When the root path is empty, adding and removing deep nested values should not break listing + e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after deletion: %v", err) + } + if len(keys) == 0 { + t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) + keys, err = b.List(context.Background(), "foo/nested1") + if err != nil { + t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) + } + // prove that the root should not be empty and that foo/nested1 exists + if len(keys) != 0 { + t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys) + } + } + + // cleanup left over listing bug test value + err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after delete of deep nest: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } +} + +func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { + t.Helper() + + e1 := &Entry{Key: "foo", Value: []byte("test")} + e2 := &Entry{Key: "foo/bar", Value: []byte("test")} + e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} + + defer func() { + b.Delete(context.Background(), "foo") + b.Delete(context.Background(), "foo/bar") + b.Delete(context.Background(), "foo/bar/baz") + }() + + err := b.Put(context.Background(), e1) + if err != nil { + t.Fatalf("failed to put entry 1: %v", err) + } + err = b.Put(context.Background(), e2) + if err != nil { + t.Fatalf("failed to put entry 2: %v", err) + } + err = b.Put(context.Background(), e3) + if err != nil { + t.Fatalf("failed to put entry 3: %v", err) + } + + // Scan the root + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("list root: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("root expected [foo foo/]: %v", keys) + } + + // Scan foo/ + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("list level 1: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" { + t.Errorf("level 1 expected [bar bar/]: %v", keys) + } + + // Scan foo/bar/ + keys, err = b.List(context.Background(), "foo/bar/") + if err != nil { + t.Fatalf("list level 2: %v", err) + } + sort.Strings(keys) + if len(keys) != 1 || keys[0] != "baz" { + t.Errorf("level 1 expected [baz]: %v", keys) + } +} + +func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { + t.Helper() + + // Get the lock + lock, err := b.LockWith("foo", "bar") + if err != nil { + t.Fatalf("initial lock: %v", err) + } + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("lock attempt 1: %v", err) + } + if leaderCh == nil { + t.Fatalf("missing leaderCh") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Errorf("should be held") + } + if val != "bar" { + t.Errorf("expected value bar: %v", err) + } + + // Second acquisition should fail + lock2, err := b2.LockWith("foo", "baz") + if err != nil { + t.Fatalf("lock 2: %v", err) + } + + // Cancel attempt in 50 msec + stopCh := make(chan struct{}) + time.AfterFunc(50*time.Millisecond, func() { + close(stopCh) + }) + + // Attempt to lock + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("stop lock 2: %v", err) + } + if leaderCh2 != nil { + t.Errorf("should not have gotten leaderCh: %v", leaderCh2) + } + + // Release the first lock + lock.Unlock() + + // Attempt to lock should work + leaderCh2, err = lock2.Lock(nil) + if err != nil { + t.Fatalf("lock 2 lock: %v", err) + } + if leaderCh2 == nil { + t.Errorf("should get leaderCh") + } + + // Check the value + held, val, err = lock2.Value() + if err != nil { + t.Fatalf("value: %v", err) + } + if !held { + t.Errorf("should still be held") + } + if val != "baz" { + t.Errorf("expected: baz, got: %v", val) + } + + // Cleanup + lock2.Unlock() +} + +func ExerciseTransactionalBackend(t testing.TB, b Backend) { + t.Helper() + tb, ok := b.(Transactional) + if !ok { + t.Fatal("Not a transactional backend") + } + + txns := SetupTestingTransactions(t, b) + + if err := tb.Transaction(context.Background(), txns); err != nil { + t.Fatal(err) + } + + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + expected := []string{"foo", "zip"} + + sort.Strings(keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) + } + + entry, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar3" { + t.Fatal("updates did not apply correctly") + } + + entry, err = b.Get(context.Background(), "zip") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "zap3" { + t.Fatal("updates did not apply correctly") + } +} + +func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { + t.Helper() + // Add a few keys so that we test rollback with deletion + if err := b.Put(context.Background(), &Entry{ + Key: "foo", + Value: []byte("bar"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "zip", + Value: []byte("zap"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme", + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme2", + }); err != nil { + t.Fatal(err) + } + + txns := []*TxnEntry{ + &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar2"), + }, + }, + &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme", + }, + }, + &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar3"), + }, + }, + &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme2", + }, + }, + &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "zip", + Value: []byte("zap3"), + }, + }, + } + + return txns +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go new file mode 100644 index 00000000..19f0d2cb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go @@ -0,0 +1,131 @@ +package physical + +import ( + "context" + + multierror "github.com/hashicorp/go-multierror" +) + +// TxnEntry is an operation that takes atomically as part of +// a transactional update. Only supported by Transactional backends. +type TxnEntry struct { + Operation Operation + Entry *Entry +} + +// Transactional is an optional interface for backends that +// support doing transactional updates of multiple keys. This is +// required for some features such as replication. +type Transactional interface { + // The function to run a transaction + Transaction(context.Context, []*TxnEntry) error +} + +type TransactionalBackend interface { + Backend + Transactional +} + +type PseudoTransactional interface { + // An internal function should do no locking or permit pool acquisition. + // Depending on the backend and if it natively supports transactions, these + // may simply chain to the normal backend functions. + GetInternal(context.Context, string) (*Entry, error) + PutInternal(context.Context, *Entry) error + DeleteInternal(context.Context, string) error +} + +// Implements the transaction interface +func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) { + rollbackStack := make([]*TxnEntry, 0, len(txns)) + var dirty bool + + // We walk the transactions in order; each successful operation goes into a + // LIFO for rollback if we hit an error along the way +TxnWalk: + for _, txn := range txns { + switch txn.Operation { + case DeleteOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + if entry == nil { + // Nothing to delete or roll back + continue + } + rollbackEntry := &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + err = t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + + case PutOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + // Nothing existed so in fact rolling back requires a delete + var rollbackEntry *TxnEntry + if entry == nil { + rollbackEntry = &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: txn.Entry.Key, + }, + } + } else { + rollbackEntry = &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + } + + err = t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + } + } + + // Need to roll back because we hit an error along the way + if dirty { + // While traversing this, if we get an error, we continue anyways in + // best-effort fashion + for _, txn := range rollbackStack { + switch txn.Operation { + case DeleteOperation: + err := t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + } + case PutOperation: + err := t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + } + } + } + } + + return +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go b/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go new file mode 100644 index 00000000..fc9e04a4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sdk/physical/types.proto + +package physical + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type EncryptedBlobInfo struct { + Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + IV []byte `protobuf:"bytes,2,opt,name=iv,proto3" json:"iv,omitempty"` + HMAC []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` + Wrapped bool `protobuf:"varint,4,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + KeyInfo *SealKeyInfo `protobuf:"bytes,5,opt,name=key_info,json=keyInfo,proto3" json:"key_info,omitempty"` + // Key is the Key value for the entry that corresponds to + // physical.Entry.Key's value + Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptedBlobInfo) Reset() { *m = EncryptedBlobInfo{} } +func (m *EncryptedBlobInfo) String() string { return proto.CompactTextString(m) } +func (*EncryptedBlobInfo) ProtoMessage() {} +func (*EncryptedBlobInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_21dce1f497d1541e, []int{0} +} + +func (m *EncryptedBlobInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptedBlobInfo.Unmarshal(m, b) +} +func (m *EncryptedBlobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptedBlobInfo.Marshal(b, m, deterministic) +} +func (m *EncryptedBlobInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptedBlobInfo.Merge(m, src) +} +func (m *EncryptedBlobInfo) XXX_Size() int { + return xxx_messageInfo_EncryptedBlobInfo.Size(m) +} +func (m *EncryptedBlobInfo) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptedBlobInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptedBlobInfo proto.InternalMessageInfo + +func (m *EncryptedBlobInfo) GetCiphertext() []byte { + if m != nil { + return m.Ciphertext + } + return nil +} + +func (m *EncryptedBlobInfo) GetIV() []byte { + if m != nil { + return m.IV + } + return nil +} + +func (m *EncryptedBlobInfo) GetHMAC() []byte { + if m != nil { + return m.HMAC + } + return nil +} + +func (m *EncryptedBlobInfo) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *EncryptedBlobInfo) GetKeyInfo() *SealKeyInfo { + if m != nil { + return m.KeyInfo + } + return nil +} + +func (m *EncryptedBlobInfo) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +// SealKeyInfo contains information regarding the seal used to encrypt the entry. +type SealKeyInfo struct { + // Mechanism is the method used by the seal to encrypt and sign the + // data as defined by the seal. + Mechanism uint64 `protobuf:"varint,1,opt,name=Mechanism,proto3" json:"Mechanism,omitempty"` + HMACMechanism uint64 `protobuf:"varint,2,opt,name=HMACMechanism,proto3" json:"HMACMechanism,omitempty"` + // This is an opaque ID used by the seal to identify the specific + // key to use as defined by the seal. This could be a version, key + // label, or something else. + KeyID string `protobuf:"bytes,3,opt,name=KeyID,proto3" json:"KeyID,omitempty"` + HMACKeyID string `protobuf:"bytes,4,opt,name=HMACKeyID,proto3" json:"HMACKeyID,omitempty"` + // These value are used when generating our own data encryption keys + // and encrypting them using the autoseal + WrappedKey []byte `protobuf:"bytes,5,opt,name=WrappedKey,proto3" json:"WrappedKey,omitempty"` + // Mechanism specific flags + Flags uint64 `protobuf:"varint,6,opt,name=Flags,proto3" json:"Flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SealKeyInfo) Reset() { *m = SealKeyInfo{} } +func (m *SealKeyInfo) String() string { return proto.CompactTextString(m) } +func (*SealKeyInfo) ProtoMessage() {} +func (*SealKeyInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_21dce1f497d1541e, []int{1} +} + +func (m *SealKeyInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SealKeyInfo.Unmarshal(m, b) +} +func (m *SealKeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SealKeyInfo.Marshal(b, m, deterministic) +} +func (m *SealKeyInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SealKeyInfo.Merge(m, src) +} +func (m *SealKeyInfo) XXX_Size() int { + return xxx_messageInfo_SealKeyInfo.Size(m) +} +func (m *SealKeyInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SealKeyInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SealKeyInfo proto.InternalMessageInfo + +func (m *SealKeyInfo) GetMechanism() uint64 { + if m != nil { + return m.Mechanism + } + return 0 +} + +func (m *SealKeyInfo) GetHMACMechanism() uint64 { + if m != nil { + return m.HMACMechanism + } + return 0 +} + +func (m *SealKeyInfo) GetKeyID() string { + if m != nil { + return m.KeyID + } + return "" +} + +func (m *SealKeyInfo) GetHMACKeyID() string { + if m != nil { + return m.HMACKeyID + } + return "" +} + +func (m *SealKeyInfo) GetWrappedKey() []byte { + if m != nil { + return m.WrappedKey + } + return nil +} + +func (m *SealKeyInfo) GetFlags() uint64 { + if m != nil { + return m.Flags + } + return 0 +} + +func init() { + proto.RegisterType((*EncryptedBlobInfo)(nil), "physical.EncryptedBlobInfo") + proto.RegisterType((*SealKeyInfo)(nil), "physical.SealKeyInfo") +} + +func init() { proto.RegisterFile("sdk/physical/types.proto", fileDescriptor_21dce1f497d1541e) } + +var fileDescriptor_21dce1f497d1541e = []byte{ + // 316 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x5d, 0x4b, 0xfb, 0x30, + 0x14, 0xc6, 0x69, 0xd7, 0xbd, 0x9d, 0xed, 0xff, 0x47, 0x83, 0x42, 0x2e, 0x44, 0xca, 0x10, 0xac, + 0x37, 0xad, 0xe8, 0x27, 0x70, 0xbe, 0xa0, 0x8c, 0xdd, 0xc4, 0x0b, 0xc1, 0x1b, 0xc9, 0xb2, 0x6c, + 0x09, 0xed, 0x9a, 0xd0, 0x66, 0xd3, 0x7c, 0x30, 0xaf, 0xfc, 0x72, 0x92, 0x94, 0xb1, 0x7a, 0x77, + 0xce, 0x2f, 0x0f, 0x0f, 0xcf, 0x93, 0x03, 0xb8, 0x5e, 0xe6, 0x99, 0x16, 0xb6, 0x96, 0x8c, 0x16, + 0x99, 0xb1, 0x9a, 0xd7, 0xa9, 0xae, 0x94, 0x51, 0x68, 0xb0, 0xa7, 0x93, 0xef, 0x00, 0x8e, 0x1f, + 0x4b, 0x56, 0x59, 0x6d, 0xf8, 0x72, 0x5a, 0xa8, 0xc5, 0x4b, 0xb9, 0x52, 0xe8, 0x1c, 0x80, 0x49, + 0x2d, 0x78, 0x65, 0xf8, 0x97, 0xc1, 0x41, 0x1c, 0x24, 0x63, 0xd2, 0x22, 0xe8, 0x3f, 0x84, 0x72, + 0x87, 0x43, 0xcf, 0x43, 0xb9, 0x43, 0x08, 0x22, 0xb1, 0xa1, 0x0c, 0x77, 0x3c, 0xf1, 0x33, 0xc2, + 0xd0, 0xff, 0xac, 0xa8, 0xd6, 0x7c, 0x89, 0xa3, 0x38, 0x48, 0x06, 0x64, 0xbf, 0xa2, 0x6b, 0x18, + 0xe4, 0xdc, 0x7e, 0xc8, 0x72, 0xa5, 0x70, 0x37, 0x0e, 0x92, 0xd1, 0xcd, 0x69, 0xba, 0x0f, 0x94, + 0xbe, 0x72, 0x5a, 0xcc, 0xb8, 0x75, 0x31, 0x48, 0x3f, 0x6f, 0x06, 0x74, 0x04, 0x9d, 0x9c, 0x5b, + 0xdc, 0x8b, 0x83, 0x64, 0x48, 0xdc, 0x38, 0xf9, 0x09, 0x60, 0xd4, 0x92, 0xa2, 0x33, 0x18, 0xce, + 0x39, 0x13, 0xb4, 0x94, 0xf5, 0xc6, 0x07, 0x8e, 0xc8, 0x01, 0xa0, 0x0b, 0xf8, 0xf7, 0x3c, 0xbf, + 0xbb, 0x3f, 0x28, 0x42, 0xaf, 0xf8, 0x0b, 0xd1, 0x09, 0x74, 0x9d, 0xdd, 0x83, 0xaf, 0x31, 0x24, + 0xcd, 0xe2, 0x9c, 0x9d, 0xac, 0x79, 0x89, 0xfc, 0xcb, 0x01, 0xb8, 0x9f, 0x7a, 0x6b, 0x6a, 0xcd, + 0xb8, 0xf5, 0x6d, 0xc6, 0xa4, 0x45, 0x9c, 0xe7, 0x53, 0x41, 0xd7, 0xb5, 0xcf, 0x1e, 0x91, 0x66, + 0x99, 0x5e, 0xbd, 0x5f, 0xae, 0xa5, 0x11, 0xdb, 0x45, 0xca, 0xd4, 0x26, 0x13, 0xb4, 0x16, 0x92, + 0xa9, 0x4a, 0x67, 0x3b, 0xba, 0x2d, 0x4c, 0xd6, 0x3e, 0xdb, 0xa2, 0xe7, 0x2f, 0x76, 0xfb, 0x1b, + 0x00, 0x00, 0xff, 0xff, 0xc6, 0x34, 0xf8, 0x45, 0xcd, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/types.proto b/vendor/github.com/hashicorp/vault/sdk/physical/types.proto new file mode 100644 index 00000000..1241382d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/types.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/physical"; + +package physical; + +message EncryptedBlobInfo { + bytes ciphertext = 1; + bytes iv = 2; + bytes hmac = 3; + bool wrapped = 4; + SealKeyInfo key_info = 5; + + // Key is the Key value for the entry that corresponds to + // physical.Entry.Key's value + string key = 6; +} + +// SealKeyInfo contains information regarding the seal used to encrypt the entry. +message SealKeyInfo { + // Mechanism is the method used by the seal to encrypt and sign the + // data as defined by the seal. + uint64 Mechanism = 1; + uint64 HMACMechanism = 2; + + // This is an opaque ID used by the seal to identify the specific + // key to use as defined by the seal. This could be a version, key + // label, or something else. + string KeyID = 3; + string HMACKeyID = 4; + + // These value are used when generating our own data encryption keys + // and encrypting them using the autoseal + bytes WrappedKey = 5; + + // Mechanism specific flags + uint64 Flags = 6; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go b/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go new file mode 100644 index 00000000..17932efe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "context" + "sync/atomic" + + "google.golang.org/grpc" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +var _ plugin.Plugin = (*GRPCBackendPlugin)(nil) +var _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) + +// GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC +// transport +type GRPCBackendPlugin struct { + Factory logical.Factory + MetadataMode bool + Logger log.Logger + + // Embeding this will disable the netRPC protocol + plugin.NetRPCUnsupportedPlugin +} + +func (b GRPCBackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + pb.RegisterBackendServer(s, &backendGRPCPluginServer{ + broker: broker, + factory: b.Factory, + // We pass the logger down into the backend so go-plugin will forward + // logs for us. + logger: b.Logger, + }) + return nil +} + +func (b *GRPCBackendPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + ret := &backendGRPCPluginClient{ + client: pb.NewBackendClient(c), + clientConn: c, + broker: broker, + cleanupCh: make(chan struct{}), + doneCtx: ctx, + metadataMode: b.MetadataMode, + } + + // Create the value and set the type + ret.server = new(atomic.Value) + ret.server.Store((*grpc.Server)(nil)) + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend.go new file mode 100644 index 00000000..a65eeebe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend.go @@ -0,0 +1,12 @@ +package plugin + +import ( + "math" + + "google.golang.org/grpc" +) + +var largeMsgGRPCCallOpts []grpc.CallOption = []grpc.CallOption{ + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go new file mode 100644 index 00000000..4bb9a2a4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go @@ -0,0 +1,280 @@ +package plugin + +import ( + "context" + "errors" + "math" + "sync/atomic" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +var ErrPluginShutdown = errors.New("plugin is shut down") +var ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") + +// Validate backendGRPCPluginClient satisfies the logical.Backend interface +var _ logical.Backend = &backendGRPCPluginClient{} + +// backendPluginClient implements logical.Backend and is the +// go-plugin client. +type backendGRPCPluginClient struct { + broker *plugin.GRPCBroker + client pb.BackendClient + metadataMode bool + + system logical.SystemView + logger log.Logger + + // This is used to signal to the Cleanup function that it can proceed + // because we have a defined server + cleanupCh chan struct{} + + // server is the grpc server used for serving storage and sysview requests. + server *atomic.Value + + // clientConn is the underlying grpc connection to the server, we store it + // so it can be cleaned up. + clientConn *grpc.ClientConn + doneCtx context.Context +} + +func (b *backendGRPCPluginClient) Initialize(ctx context.Context, _ *logical.InitializationRequest) error { + if b.metadataMode { + return nil + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + reply, err := b.client.Initialize(ctx, &pb.InitializeArgs{}, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + // If the plugin doesn't have Initialize implemented we should not fail + // the initialize call; otherwise this could halt startup of vault. + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return nil + } + + return err + } + if reply.Err != nil { + return pb.ProtoErrToErr(reply.Err) + } + + return nil +} + +func (b *backendGRPCPluginClient) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.metadataMode { + return nil, ErrClientInMetadataMode + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + protoReq, err := pb.LogicalRequestToProtoRequest(req) + if err != nil { + return nil, err + } + + reply, err := b.client.HandleRequest(ctx, &pb.HandleRequestArgs{ + Request: protoReq, + }, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + + return nil, err + } + resp, err := pb.ProtoResponseToLogicalResponse(reply.Response) + if err != nil { + return nil, err + } + if reply.Err != nil { + return resp, pb.ProtoErrToErr(reply.Err) + } + + return resp, nil +} + +func (b *backendGRPCPluginClient) SpecialPaths() *logical.Paths { + reply, err := b.client.SpecialPaths(b.doneCtx, &pb.Empty{}) + if err != nil { + return nil + } + + if reply.Paths == nil { + return nil + } + + return &logical.Paths{ + Root: reply.Paths.Root, + Unauthenticated: reply.Paths.Unauthenticated, + LocalStorage: reply.Paths.LocalStorage, + SealWrapStorage: reply.Paths.SealWrapStorage, + } +} + +// System returns vault's system view. The backend client stores the view during +// Setup, so there is no need to shim the system just to get it back. +func (b *backendGRPCPluginClient) System() logical.SystemView { + return b.system +} + +// Logger returns vault's logger. The backend client stores the logger during +// Setup, so there is no need to shim the logger just to get it back. +func (b *backendGRPCPluginClient) Logger() log.Logger { + return b.logger +} + +func (b *backendGRPCPluginClient) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { + if b.metadataMode { + return false, false, ErrClientInMetadataMode + } + + protoReq, err := pb.LogicalRequestToProtoRequest(req) + if err != nil { + return false, false, err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + reply, err := b.client.HandleExistenceCheck(ctx, &pb.HandleExistenceCheckArgs{ + Request: protoReq, + }, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return false, false, ErrPluginShutdown + } + return false, false, err + } + if reply.Err != nil { + return false, false, pb.ProtoErrToErr(reply.Err) + } + + return reply.CheckFound, reply.Exists, nil +} + +func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + b.client.Cleanup(ctx, &pb.Empty{}) + + // This will block until Setup has run the function to create a new server + // in b.server. If we stop here before it has a chance to actually start + // listening, when it starts listening it will immediately error out and + // exit, which is fine. Overall this ensures that we do not miss stopping + // the server if it ends up being created after Cleanup is called. + <-b.cleanupCh + server := b.server.Load() + if server != nil { + server.(*grpc.Server).GracefulStop() + } + b.clientConn.Close() +} + +func (b *backendGRPCPluginClient) InvalidateKey(ctx context.Context, key string) { + if b.metadataMode { + return + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + b.client.InvalidateKey(ctx, &pb.InvalidateKeyArgs{ + Key: key, + }) +} + +func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.BackendConfig) error { + // Shim logical.Storage + storageImpl := config.StorageView + if b.metadataMode { + storageImpl = &NOOPStorage{} + } + storage := &GRPCStorageServer{ + impl: storageImpl, + } + + // Shim logical.SystemView + sysViewImpl := config.System + if b.metadataMode { + sysViewImpl = &logical.StaticSystemView{} + } + sysView := &gRPCSystemViewServer{ + impl: sysViewImpl, + } + + // Register the server in this closure. + serverFunc := func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) + opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) + + s := grpc.NewServer(opts...) + pb.RegisterSystemViewServer(s, sysView) + pb.RegisterStorageServer(s, storage) + b.server.Store(s) + close(b.cleanupCh) + return s + } + brokerID := b.broker.NextId() + go b.broker.AcceptAndServe(brokerID, serverFunc) + + args := &pb.SetupArgs{ + BrokerID: brokerID, + Config: config.Config, + BackendUUID: config.BackendUUID, + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + reply, err := b.client.Setup(ctx, args) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + + // Set system and logger for getter methods + b.system = config.System + b.logger = config.Logger + + return nil +} + +func (b *backendGRPCPluginClient) Type() logical.BackendType { + reply, err := b.client.Type(b.doneCtx, &pb.Empty{}) + if err != nil { + return logical.TypeUnknown + } + + return logical.BackendType(reply.Type) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_server.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_server.go new file mode 100644 index 00000000..b895a9cf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_server.go @@ -0,0 +1,161 @@ +package plugin + +import ( + "context" + "errors" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" +) + +var ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode") + +type backendGRPCPluginServer struct { + broker *plugin.GRPCBroker + backend logical.Backend + + factory logical.Factory + + brokeredClient *grpc.ClientConn + + logger log.Logger +} + +// Setup dials into the plugin's broker to get a shimmed storage, logger, and +// system view of the backend. This method also instantiates the underlying +// backend through its factory func for the server side of the plugin. +func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) (*pb.SetupReply, error) { + // Dial for storage + brokeredClient, err := b.broker.Dial(args.BrokerID) + if err != nil { + return &pb.SetupReply{}, err + } + b.brokeredClient = brokeredClient + storage := newGRPCStorageClient(brokeredClient) + sysView := newGRPCSystemView(brokeredClient) + + config := &logical.BackendConfig{ + StorageView: storage, + Logger: b.logger, + System: sysView, + Config: args.Config, + BackendUUID: args.BackendUUID, + } + + // Call the underlying backend factory after shims have been created + // to set b.backend + backend, err := b.factory(ctx, config) + if err != nil { + return &pb.SetupReply{ + Err: pb.ErrToString(err), + }, nil + } + b.backend = backend + + return &pb.SetupReply{}, nil +} + +func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.HandleRequestArgs) (*pb.HandleRequestReply, error) { + if pluginutil.InMetadataMode() { + return &pb.HandleRequestReply{}, ErrServerInMetadataMode + } + + logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request) + if err != nil { + return &pb.HandleRequestReply{}, err + } + + logicalReq.Storage = newGRPCStorageClient(b.brokeredClient) + + resp, respErr := b.backend.HandleRequest(ctx, logicalReq) + + pbResp, err := pb.LogicalResponseToProtoResponse(resp) + if err != nil { + return &pb.HandleRequestReply{}, err + } + + return &pb.HandleRequestReply{ + Response: pbResp, + Err: pb.ErrToProtoErr(respErr), + }, nil +} + +func (b *backendGRPCPluginServer) Initialize(ctx context.Context, _ *pb.InitializeArgs) (*pb.InitializeReply, error) { + if pluginutil.InMetadataMode() { + return &pb.InitializeReply{}, ErrServerInMetadataMode + } + + req := &logical.InitializationRequest{ + Storage: newGRPCStorageClient(b.brokeredClient), + } + + respErr := b.backend.Initialize(ctx, req) + + return &pb.InitializeReply{ + Err: pb.ErrToProtoErr(respErr), + }, nil +} + +func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Empty) (*pb.SpecialPathsReply, error) { + paths := b.backend.SpecialPaths() + if paths == nil { + return &pb.SpecialPathsReply{ + Paths: nil, + }, nil + } + + return &pb.SpecialPathsReply{ + Paths: &pb.Paths{ + Root: paths.Root, + Unauthenticated: paths.Unauthenticated, + LocalStorage: paths.LocalStorage, + SealWrapStorage: paths.SealWrapStorage, + }, + }, nil +} + +func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args *pb.HandleExistenceCheckArgs) (*pb.HandleExistenceCheckReply, error) { + if pluginutil.InMetadataMode() { + return &pb.HandleExistenceCheckReply{}, ErrServerInMetadataMode + } + + logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request) + if err != nil { + return &pb.HandleExistenceCheckReply{}, err + } + logicalReq.Storage = newGRPCStorageClient(b.brokeredClient) + + checkFound, exists, err := b.backend.HandleExistenceCheck(ctx, logicalReq) + return &pb.HandleExistenceCheckReply{ + CheckFound: checkFound, + Exists: exists, + Err: pb.ErrToProtoErr(err), + }, nil +} + +func (b *backendGRPCPluginServer) Cleanup(ctx context.Context, _ *pb.Empty) (*pb.Empty, error) { + b.backend.Cleanup(ctx) + + // Close rpc clients + b.brokeredClient.Close() + return &pb.Empty{}, nil +} + +func (b *backendGRPCPluginServer) InvalidateKey(ctx context.Context, args *pb.InvalidateKeyArgs) (*pb.Empty, error) { + if pluginutil.InMetadataMode() { + return &pb.Empty{}, ErrServerInMetadataMode + } + + b.backend.InvalidateKey(ctx, args.Key) + return &pb.Empty{}, nil +} + +func (b *backendGRPCPluginServer) Type(ctx context.Context, _ *pb.Empty) (*pb.TypeReply, error) { + return &pb.TypeReply{ + Type: uint32(b.backend.Type()), + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_storage.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_storage.go new file mode 100644 index 00000000..8eb56650 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_storage.go @@ -0,0 +1,130 @@ +package plugin + +import ( + "context" + "errors" + + "google.golang.org/grpc" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +func newGRPCStorageClient(conn *grpc.ClientConn) *GRPCStorageClient { + return &GRPCStorageClient{ + client: pb.NewStorageClient(conn), + } +} + +// GRPCStorageClient is an implementation of logical.Storage that communicates +// over RPC. +type GRPCStorageClient struct { + client pb.StorageClient +} + +func (s *GRPCStorageClient) List(ctx context.Context, prefix string) ([]string, error) { + reply, err := s.client.List(ctx, &pb.StorageListArgs{ + Prefix: prefix, + }, largeMsgGRPCCallOpts...) + if err != nil { + return []string{}, err + } + if reply.Err != "" { + return reply.Keys, errors.New(reply.Err) + } + return reply.Keys, nil +} + +func (s *GRPCStorageClient) Get(ctx context.Context, key string) (*logical.StorageEntry, error) { + reply, err := s.client.Get(ctx, &pb.StorageGetArgs{ + Key: key, + }, largeMsgGRPCCallOpts...) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + return pb.ProtoStorageEntryToLogicalStorageEntry(reply.Entry), nil +} + +func (s *GRPCStorageClient) Put(ctx context.Context, entry *logical.StorageEntry) error { + reply, err := s.client.Put(ctx, &pb.StoragePutArgs{ + Entry: pb.LogicalStorageEntryToProtoStorageEntry(entry), + }, largeMsgGRPCCallOpts...) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + return nil +} + +func (s *GRPCStorageClient) Delete(ctx context.Context, key string) error { + reply, err := s.client.Delete(ctx, &pb.StorageDeleteArgs{ + Key: key, + }) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + return nil +} + +// StorageServer is a net/rpc compatible structure for serving +type GRPCStorageServer struct { + impl logical.Storage +} + +func (s *GRPCStorageServer) List(ctx context.Context, args *pb.StorageListArgs) (*pb.StorageListReply, error) { + keys, err := s.impl.List(ctx, args.Prefix) + return &pb.StorageListReply{ + Keys: keys, + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Get(ctx context.Context, args *pb.StorageGetArgs) (*pb.StorageGetReply, error) { + storageEntry, err := s.impl.Get(ctx, args.Key) + return &pb.StorageGetReply{ + Entry: pb.LogicalStorageEntryToProtoStorageEntry(storageEntry), + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Put(ctx context.Context, args *pb.StoragePutArgs) (*pb.StoragePutReply, error) { + err := s.impl.Put(ctx, pb.ProtoStorageEntryToLogicalStorageEntry(args.Entry)) + return &pb.StoragePutReply{ + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Delete(ctx context.Context, args *pb.StorageDeleteArgs) (*pb.StorageDeleteReply, error) { + err := s.impl.Delete(ctx, args.Key) + return &pb.StorageDeleteReply{ + Err: pb.ErrToString(err), + }, nil +} + +// NOOPStorage is used to deny access to the storage interface while running a +// backend plugin in metadata mode. +type NOOPStorage struct{} + +func (s *NOOPStorage) List(_ context.Context, prefix string) ([]string, error) { + return []string{}, nil +} + +func (s *NOOPStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) { + return nil, nil +} + +func (s *NOOPStorage) Put(_ context.Context, entry *logical.StorageEntry) error { + return nil +} + +func (s *NOOPStorage) Delete(_ context.Context, key string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_system.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_system.go new file mode 100644 index 00000000..9047baff --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_system.go @@ -0,0 +1,250 @@ +package plugin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" +) + +func newGRPCSystemView(conn *grpc.ClientConn) *gRPCSystemViewClient { + return &gRPCSystemViewClient{ + client: pb.NewSystemViewClient(conn), + } +} + +type gRPCSystemViewClient struct { + client pb.SystemViewClient +} + +func (s *gRPCSystemViewClient) DefaultLeaseTTL() time.Duration { + reply, err := s.client.DefaultLeaseTTL(context.Background(), &pb.Empty{}) + if err != nil { + return 0 + } + + return time.Duration(reply.TTL) +} + +func (s *gRPCSystemViewClient) MaxLeaseTTL() time.Duration { + reply, err := s.client.MaxLeaseTTL(context.Background(), &pb.Empty{}) + if err != nil { + return 0 + } + + return time.Duration(reply.TTL) +} + +func (s *gRPCSystemViewClient) Tainted() bool { + reply, err := s.client.Tainted(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Tainted +} + +func (s *gRPCSystemViewClient) CachingDisabled() bool { + reply, err := s.client.CachingDisabled(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Disabled +} + +func (s *gRPCSystemViewClient) ReplicationState() consts.ReplicationState { + reply, err := s.client.ReplicationState(context.Background(), &pb.Empty{}) + if err != nil { + return consts.ReplicationUnknown + } + + return consts.ReplicationState(reply.State) +} + +func (s *gRPCSystemViewClient) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + buf, err := json.Marshal(data) + if err != nil { + return nil, err + } + + reply, err := s.client.ResponseWrapData(ctx, &pb.ResponseWrapDataArgs{ + Data: string(buf[:]), + TTL: int64(ttl), + JWT: false, + }) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + + info, err := pb.ProtoResponseWrapInfoToLogicalResponseWrapInfo(reply.WrapInfo) + if err != nil { + return nil, err + } + + return info, nil +} + +func (s *gRPCSystemViewClient) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend") +} + +func (s *gRPCSystemViewClient) MlockEnabled() bool { + reply, err := s.client.MlockEnabled(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Enabled +} + +func (s *gRPCSystemViewClient) HasFeature(feature license.Features) bool { + // Not implemented + return false +} + +func (s *gRPCSystemViewClient) LocalMount() bool { + reply, err := s.client.LocalMount(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Local +} + +func (s *gRPCSystemViewClient) EntityInfo(entityID string) (*logical.Entity, error) { + reply, err := s.client.EntityInfo(context.Background(), &pb.EntityInfoArgs{ + EntityID: entityID, + }) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + + return reply.Entity, nil +} + +func (s *gRPCSystemViewClient) PluginEnv(ctx context.Context) (*logical.PluginEnvironment, error) { + reply, err := s.client.PluginEnv(ctx, &pb.Empty{}) + if err != nil { + return nil, err + } + + return reply.PluginEnvironment, nil +} + +type gRPCSystemViewServer struct { + impl logical.SystemView +} + +func (s *gRPCSystemViewServer) DefaultLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) { + ttl := s.impl.DefaultLeaseTTL() + return &pb.TTLReply{ + TTL: int64(ttl), + }, nil +} + +func (s *gRPCSystemViewServer) MaxLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) { + ttl := s.impl.MaxLeaseTTL() + return &pb.TTLReply{ + TTL: int64(ttl), + }, nil +} + +func (s *gRPCSystemViewServer) Tainted(ctx context.Context, _ *pb.Empty) (*pb.TaintedReply, error) { + tainted := s.impl.Tainted() + return &pb.TaintedReply{ + Tainted: tainted, + }, nil +} + +func (s *gRPCSystemViewServer) CachingDisabled(ctx context.Context, _ *pb.Empty) (*pb.CachingDisabledReply, error) { + cachingDisabled := s.impl.CachingDisabled() + return &pb.CachingDisabledReply{ + Disabled: cachingDisabled, + }, nil +} + +func (s *gRPCSystemViewServer) ReplicationState(ctx context.Context, _ *pb.Empty) (*pb.ReplicationStateReply, error) { + replicationState := s.impl.ReplicationState() + return &pb.ReplicationStateReply{ + State: int32(replicationState), + }, nil +} + +func (s *gRPCSystemViewServer) ResponseWrapData(ctx context.Context, args *pb.ResponseWrapDataArgs) (*pb.ResponseWrapDataReply, error) { + data := map[string]interface{}{} + err := json.Unmarshal([]byte(args.Data), &data) + if err != nil { + return &pb.ResponseWrapDataReply{}, err + } + + // Do not allow JWTs to be returned + info, err := s.impl.ResponseWrapData(ctx, data, time.Duration(args.TTL), false) + if err != nil { + return &pb.ResponseWrapDataReply{ + Err: pb.ErrToString(err), + }, nil + } + + pbInfo, err := pb.LogicalResponseWrapInfoToProtoResponseWrapInfo(info) + if err != nil { + return &pb.ResponseWrapDataReply{}, err + } + + return &pb.ResponseWrapDataReply{ + WrapInfo: pbInfo, + }, nil +} + +func (s *gRPCSystemViewServer) MlockEnabled(ctx context.Context, _ *pb.Empty) (*pb.MlockEnabledReply, error) { + enabled := s.impl.MlockEnabled() + return &pb.MlockEnabledReply{ + Enabled: enabled, + }, nil +} + +func (s *gRPCSystemViewServer) LocalMount(ctx context.Context, _ *pb.Empty) (*pb.LocalMountReply, error) { + local := s.impl.LocalMount() + return &pb.LocalMountReply{ + Local: local, + }, nil +} + +func (s *gRPCSystemViewServer) EntityInfo(ctx context.Context, args *pb.EntityInfoArgs) (*pb.EntityInfoReply, error) { + entity, err := s.impl.EntityInfo(args.EntityID) + if err != nil { + return &pb.EntityInfoReply{ + Err: pb.ErrToString(err), + }, nil + } + return &pb.EntityInfoReply{ + Entity: entity, + }, nil +} + +func (s *gRPCSystemViewServer) PluginEnv(ctx context.Context, _ *pb.Empty) (*pb.PluginEnvReply, error) { + pluginEnv, err := s.impl.PluginEnv(ctx) + if err != nil { + return &pb.PluginEnvReply{ + Err: pb.ErrToString(err), + }, nil + } + return &pb.PluginEnvReply{ + PluginEnvironment: pluginEnv, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go b/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go new file mode 100644 index 00000000..a59a8a3d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go @@ -0,0 +1,134 @@ +package plugin + +import hclog "github.com/hashicorp/go-hclog" + +type LoggerServer struct { + logger hclog.Logger +} + +func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error { + l.logger.Trace(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error { + l.logger.Debug(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error { + l.logger.Info(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error { + l.logger.Warn(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { + l.logger.Error(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { + + switch translateLevel(args.Level) { + + case hclog.Trace: + l.logger.Trace(args.Msg, args.Args...) + + case hclog.Debug: + l.logger.Debug(args.Msg, args.Args...) + + case hclog.Info: + l.logger.Info(args.Msg, args.Args...) + + case hclog.Warn: + l.logger.Warn(args.Msg, args.Args...) + + case hclog.Error: + l.logger.Error(args.Msg, args.Args...) + + case hclog.NoLevel: + } + return nil +} + +func (l *LoggerServer) SetLevel(args int, _ *struct{}) error { + level := translateLevel(args) + l.logger = hclog.New(&hclog.LoggerOptions{Level: level}) + return nil +} + +func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error { + result := l.logger.IsTrace() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error { + result := l.logger.IsDebug() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error { + result := l.logger.IsInfo() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error { + result := l.logger.IsWarn() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +type LoggerArgs struct { + Level int + Msg string + Args []interface{} +} + +// LoggerReply contains the RPC reply. Not all fields may be used +// for a particular RPC call. +type LoggerReply struct { + IsTrue bool + Error error +} + +func translateLevel(logxiLevel int) hclog.Level { + + switch logxiLevel { + + case 1000, 10: + // logxi.LevelAll, logxi.LevelTrace: + return hclog.Trace + + case 7: + // logxi.LevelDebug: + return hclog.Debug + + case 6, 5: + // logxi.LevelInfo, logxi.LevelNotice: + return hclog.Info + + case 4: + // logxi.LevelWarn: + return hclog.Warn + + case 3, 2, 1, -1: + // logxi.LevelError, logxi.LevelFatal, logxi.LevelAlert, logxi.LevelEmergency: + return hclog.Error + } + return hclog.NoLevel +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/middleware.go b/vendor/github.com/hashicorp/vault/sdk/plugin/middleware.go new file mode 100644 index 00000000..04a6f4c5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/middleware.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "context" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" +) + +// backendPluginClient implements logical.Backend and is the +// go-plugin client. +type backendTracingMiddleware struct { + logger log.Logger + + next logical.Backend +} + +// Validate the backendTracingMiddle object satisfies the backend interface +var _ logical.Backend = &backendTracingMiddleware{} + +func (b *backendTracingMiddleware) Initialize(ctx context.Context, req *logical.InitializationRequest) (err error) { + defer func(then time.Time) { + b.logger.Trace("initialize", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("initialize", "status", "started") + return b.next.Initialize(ctx, req) +} + +func (b *backendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { + defer func(then time.Time) { + b.logger.Trace("handle request", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("handle request", "path", req.Path, "status", "started") + return b.next.HandleRequest(ctx, req) +} + +func (b *backendTracingMiddleware) SpecialPaths() *logical.Paths { + defer func(then time.Time) { + b.logger.Trace("special paths", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("special paths", "status", "started") + return b.next.SpecialPaths() +} + +func (b *backendTracingMiddleware) System() logical.SystemView { + return b.next.System() +} + +func (b *backendTracingMiddleware) Logger() log.Logger { + return b.next.Logger() +} + +func (b *backendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) { + defer func(then time.Time) { + b.logger.Trace("handle existence check", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("handle existence check", "path", req.Path, "status", "started") + return b.next.HandleExistenceCheck(ctx, req) +} + +func (b *backendTracingMiddleware) Cleanup(ctx context.Context) { + defer func(then time.Time) { + b.logger.Trace("cleanup", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("cleanup", "status", "started") + b.next.Cleanup(ctx) +} + +func (b *backendTracingMiddleware) InvalidateKey(ctx context.Context, key string) { + defer func(then time.Time) { + b.logger.Trace("invalidate key", "key", key, "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("invalidate key", "key", key, "status", "started") + b.next.InvalidateKey(ctx, key) +} + +func (b *backendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) { + defer func(then time.Time) { + b.logger.Trace("setup", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("setup", "status", "started") + return b.next.Setup(ctx, config) +} + +func (b *backendTracingMiddleware) Type() logical.BackendType { + defer func(then time.Time) { + b.logger.Trace("type", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("type", "status", "started") + return b.next.Type() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.pb.go b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.pb.go new file mode 100644 index 00000000..566bf63f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.pb.go @@ -0,0 +1,3879 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sdk/plugin/pb/backend.proto + +package pb + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + logical "github.com/hashicorp/vault/sdk/logical" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type Header struct { + Header []string `sentinel:"" protobuf:"bytes,1,rep,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{1} +} + +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetHeader() []string { + if m != nil { + return m.Header + } + return nil +} + +type ProtoError struct { + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + ErrType uint32 `sentinel:"" protobuf:"varint,1,opt,name=err_type,json=errType,proto3" json:"err_type,omitempty"` + ErrMsg string `sentinel:"" protobuf:"bytes,2,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"` + ErrCode int64 `sentinel:"" protobuf:"varint,3,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProtoError) Reset() { *m = ProtoError{} } +func (m *ProtoError) String() string { return proto.CompactTextString(m) } +func (*ProtoError) ProtoMessage() {} +func (*ProtoError) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{2} +} + +func (m *ProtoError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProtoError.Unmarshal(m, b) +} +func (m *ProtoError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProtoError.Marshal(b, m, deterministic) +} +func (m *ProtoError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoError.Merge(m, src) +} +func (m *ProtoError) XXX_Size() int { + return xxx_messageInfo_ProtoError.Size(m) +} +func (m *ProtoError) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoError.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoError proto.InternalMessageInfo + +func (m *ProtoError) GetErrType() uint32 { + if m != nil { + return m.ErrType + } + return 0 +} + +func (m *ProtoError) GetErrMsg() string { + if m != nil { + return m.ErrMsg + } + return "" +} + +func (m *ProtoError) GetErrCode() int64 { + if m != nil { + return m.ErrCode + } + return 0 +} + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + // Root are the paths that require a root token to access + Root []string `sentinel:"" protobuf:"bytes,1,rep,name=root,proto3" json:"root,omitempty"` + // Unauthenticated are the paths that can be accessed without any auth. + Unauthenticated []string `sentinel:"" protobuf:"bytes,2,rep,name=unauthenticated,proto3" json:"unauthenticated,omitempty"` + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + LocalStorage []string `sentinel:"" protobuf:"bytes,3,rep,name=local_storage,json=localStorage,proto3" json:"local_storage,omitempty"` + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string `sentinel:"" protobuf:"bytes,4,rep,name=seal_wrap_storage,json=sealWrapStorage,proto3" json:"seal_wrap_storage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{3} +} + +func (m *Paths) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Paths.Unmarshal(m, b) +} +func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Paths.Marshal(b, m, deterministic) +} +func (m *Paths) XXX_Merge(src proto.Message) { + xxx_messageInfo_Paths.Merge(m, src) +} +func (m *Paths) XXX_Size() int { + return xxx_messageInfo_Paths.Size(m) +} +func (m *Paths) XXX_DiscardUnknown() { + xxx_messageInfo_Paths.DiscardUnknown(m) +} + +var xxx_messageInfo_Paths proto.InternalMessageInfo + +func (m *Paths) GetRoot() []string { + if m != nil { + return m.Root + } + return nil +} + +func (m *Paths) GetUnauthenticated() []string { + if m != nil { + return m.Unauthenticated + } + return nil +} + +func (m *Paths) GetLocalStorage() []string { + if m != nil { + return m.LocalStorage + } + return nil +} + +func (m *Paths) GetSealWrapStorage() []string { + if m != nil { + return m.SealWrapStorage + } + return nil +} + +type Request struct { + // ID is the uuid associated with each request + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `sentinel:"" protobuf:"bytes,2,opt,name=ReplicationCluster,proto3" json:"ReplicationCluster,omitempty"` + // Operation is the requested operation type + Operation string `sentinel:"" protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"` + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + Path string `sentinel:"" protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + // Request data is a JSON object that must have keys with string type. + Data string `sentinel:"" protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `sentinel:"" protobuf:"bytes,6,opt,name=secret,proto3" json:"secret,omitempty"` + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `sentinel:"" protobuf:"bytes,7,opt,name=auth,proto3" json:"auth,omitempty"` + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string]*Header `sentinel:"" protobuf:"bytes,8,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `sentinel:"" protobuf:"bytes,9,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"` + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `sentinel:"" protobuf:"bytes,10,opt,name=client_token_accessor,json=clientTokenAccessor,proto3" json:"client_token_accessor,omitempty"` + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `sentinel:"" protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `sentinel:"" protobuf:"bytes,12,opt,name=mount_point,json=mountPoint,proto3" json:"mount_point,omitempty"` + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `sentinel:"" protobuf:"bytes,13,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `sentinel:"" protobuf:"bytes,14,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `sentinel:"" protobuf:"bytes,15,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int64 `sentinel:"" protobuf:"varint,16,opt,name=client_token_remaining_uses,json=clientTokenRemainingUses,proto3" json:"client_token_remaining_uses,omitempty"` + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `sentinel:"" protobuf:"bytes,17,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `sentinel:"" protobuf:"varint,18,opt,name=policy_override,json=policyOverride,proto3" json:"policy_override,omitempty"` + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `sentinel:"" protobuf:"varint,19,opt,name=unauthenticated,proto3" json:"unauthenticated,omitempty"` + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `sentinel:"" protobuf:"bytes,20,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{4} +} + +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Request) GetReplicationCluster() string { + if m != nil { + return m.ReplicationCluster + } + return "" +} + +func (m *Request) GetOperation() string { + if m != nil { + return m.Operation + } + return "" +} + +func (m *Request) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Request) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func (m *Request) GetSecret() *Secret { + if m != nil { + return m.Secret + } + return nil +} + +func (m *Request) GetAuth() *Auth { + if m != nil { + return m.Auth + } + return nil +} + +func (m *Request) GetHeaders() map[string]*Header { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Request) GetClientToken() string { + if m != nil { + return m.ClientToken + } + return "" +} + +func (m *Request) GetClientTokenAccessor() string { + if m != nil { + return m.ClientTokenAccessor + } + return "" +} + +func (m *Request) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Request) GetMountPoint() string { + if m != nil { + return m.MountPoint + } + return "" +} + +func (m *Request) GetMountType() string { + if m != nil { + return m.MountType + } + return "" +} + +func (m *Request) GetMountAccessor() string { + if m != nil { + return m.MountAccessor + } + return "" +} + +func (m *Request) GetWrapInfo() *RequestWrapInfo { + if m != nil { + return m.WrapInfo + } + return nil +} + +func (m *Request) GetClientTokenRemainingUses() int64 { + if m != nil { + return m.ClientTokenRemainingUses + } + return 0 +} + +func (m *Request) GetEntityID() string { + if m != nil { + return m.EntityID + } + return "" +} + +func (m *Request) GetPolicyOverride() bool { + if m != nil { + return m.PolicyOverride + } + return false +} + +func (m *Request) GetUnauthenticated() bool { + if m != nil { + return m.Unauthenticated + } + return false +} + +func (m *Request) GetConnection() *Connection { + if m != nil { + return m.Connection + } + return nil +} + +type Auth struct { + LeaseOptions *LeaseOptions `sentinel:"" protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"` + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData string `sentinel:"" protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"` + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `sentinel:"" protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `sentinel:"" protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"` + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `sentinel:"" protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `sentinel:"" protobuf:"bytes,6,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"` + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `sentinel:"" protobuf:"bytes,7,opt,name=accessor,proto3" json:"accessor,omitempty"` + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period int64 `sentinel:"" protobuf:"varint,8,opt,name=period,proto3" json:"period,omitempty"` + // Number of allowed uses of the issued token + NumUses int64 `sentinel:"" protobuf:"varint,9,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"` + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `sentinel:"" protobuf:"bytes,10,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *logical.Alias `sentinel:"" protobuf:"bytes,11,opt,name=alias,proto3" json:"alias,omitempty"` + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*logical.Alias `sentinel:"" protobuf:"bytes,12,rep,name=group_aliases,json=groupAliases,proto3" json:"group_aliases,omitempty"` + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + BoundCIDRs []string `sentinel:"" protobuf:"bytes,13,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"` + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `sentinel:"" protobuf:"bytes,14,rep,name=token_policies,json=tokenPolicies,proto3" json:"token_policies,omitempty"` + IdentityPolicies []string `sentinel:"" protobuf:"bytes,15,rep,name=identity_policies,json=identityPolicies,proto3" json:"identity_policies,omitempty"` + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + ExplicitMaxTTL int64 `sentinel:"" protobuf:"varint,16,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"` + // TokenType is the type of token being requested + TokenType uint32 `sentinel:"" protobuf:"varint,17,opt,name=token_type,json=tokenType,proto3" json:"token_type,omitempty"` + // Whether the default policy should be added automatically by core + NoDefaultPolicy bool `sentinel:"" protobuf:"varint,18,opt,name=no_default_policy,json=noDefaultPolicy,proto3" json:"no_default_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Auth) Reset() { *m = Auth{} } +func (m *Auth) String() string { return proto.CompactTextString(m) } +func (*Auth) ProtoMessage() {} +func (*Auth) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{5} +} + +func (m *Auth) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Auth.Unmarshal(m, b) +} +func (m *Auth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Auth.Marshal(b, m, deterministic) +} +func (m *Auth) XXX_Merge(src proto.Message) { + xxx_messageInfo_Auth.Merge(m, src) +} +func (m *Auth) XXX_Size() int { + return xxx_messageInfo_Auth.Size(m) +} +func (m *Auth) XXX_DiscardUnknown() { + xxx_messageInfo_Auth.DiscardUnknown(m) +} + +var xxx_messageInfo_Auth proto.InternalMessageInfo + +func (m *Auth) GetLeaseOptions() *LeaseOptions { + if m != nil { + return m.LeaseOptions + } + return nil +} + +func (m *Auth) GetInternalData() string { + if m != nil { + return m.InternalData + } + return "" +} + +func (m *Auth) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Auth) GetPolicies() []string { + if m != nil { + return m.Policies + } + return nil +} + +func (m *Auth) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Auth) GetClientToken() string { + if m != nil { + return m.ClientToken + } + return "" +} + +func (m *Auth) GetAccessor() string { + if m != nil { + return m.Accessor + } + return "" +} + +func (m *Auth) GetPeriod() int64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *Auth) GetNumUses() int64 { + if m != nil { + return m.NumUses + } + return 0 +} + +func (m *Auth) GetEntityID() string { + if m != nil { + return m.EntityID + } + return "" +} + +func (m *Auth) GetAlias() *logical.Alias { + if m != nil { + return m.Alias + } + return nil +} + +func (m *Auth) GetGroupAliases() []*logical.Alias { + if m != nil { + return m.GroupAliases + } + return nil +} + +func (m *Auth) GetBoundCIDRs() []string { + if m != nil { + return m.BoundCIDRs + } + return nil +} + +func (m *Auth) GetTokenPolicies() []string { + if m != nil { + return m.TokenPolicies + } + return nil +} + +func (m *Auth) GetIdentityPolicies() []string { + if m != nil { + return m.IdentityPolicies + } + return nil +} + +func (m *Auth) GetExplicitMaxTTL() int64 { + if m != nil { + return m.ExplicitMaxTTL + } + return 0 +} + +func (m *Auth) GetTokenType() uint32 { + if m != nil { + return m.TokenType + } + return 0 +} + +func (m *Auth) GetNoDefaultPolicy() bool { + if m != nil { + return m.NoDefaultPolicy + } + return false +} + +type TokenEntry struct { + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Accessor string `sentinel:"" protobuf:"bytes,2,opt,name=accessor,proto3" json:"accessor,omitempty"` + Parent string `sentinel:"" protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` + Policies []string `sentinel:"" protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"` + Path string `sentinel:"" protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + Meta map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DisplayName string `sentinel:"" protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + NumUses int64 `sentinel:"" protobuf:"varint,8,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"` + CreationTime int64 `sentinel:"" protobuf:"varint,9,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + TTL int64 `sentinel:"" protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + ExplicitMaxTTL int64 `sentinel:"" protobuf:"varint,11,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"` + Role string `sentinel:"" protobuf:"bytes,12,opt,name=role,proto3" json:"role,omitempty"` + Period int64 `sentinel:"" protobuf:"varint,13,opt,name=period,proto3" json:"period,omitempty"` + EntityID string `sentinel:"" protobuf:"bytes,14,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + BoundCIDRs []string `sentinel:"" protobuf:"bytes,15,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"` + NamespaceID string `sentinel:"" protobuf:"bytes,16,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + CubbyholeID string `sentinel:"" protobuf:"bytes,17,opt,name=cubbyhole_id,json=cubbyholeId,proto3" json:"cubbyhole_id,omitempty"` + Type uint32 `sentinel:"" protobuf:"varint,18,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TokenEntry) Reset() { *m = TokenEntry{} } +func (m *TokenEntry) String() string { return proto.CompactTextString(m) } +func (*TokenEntry) ProtoMessage() {} +func (*TokenEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{6} +} + +func (m *TokenEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TokenEntry.Unmarshal(m, b) +} +func (m *TokenEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TokenEntry.Marshal(b, m, deterministic) +} +func (m *TokenEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenEntry.Merge(m, src) +} +func (m *TokenEntry) XXX_Size() int { + return xxx_messageInfo_TokenEntry.Size(m) +} +func (m *TokenEntry) XXX_DiscardUnknown() { + xxx_messageInfo_TokenEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenEntry proto.InternalMessageInfo + +func (m *TokenEntry) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *TokenEntry) GetAccessor() string { + if m != nil { + return m.Accessor + } + return "" +} + +func (m *TokenEntry) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *TokenEntry) GetPolicies() []string { + if m != nil { + return m.Policies + } + return nil +} + +func (m *TokenEntry) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *TokenEntry) GetMeta() map[string]string { + if m != nil { + return m.Meta + } + return nil +} + +func (m *TokenEntry) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *TokenEntry) GetNumUses() int64 { + if m != nil { + return m.NumUses + } + return 0 +} + +func (m *TokenEntry) GetCreationTime() int64 { + if m != nil { + return m.CreationTime + } + return 0 +} + +func (m *TokenEntry) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *TokenEntry) GetExplicitMaxTTL() int64 { + if m != nil { + return m.ExplicitMaxTTL + } + return 0 +} + +func (m *TokenEntry) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *TokenEntry) GetPeriod() int64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *TokenEntry) GetEntityID() string { + if m != nil { + return m.EntityID + } + return "" +} + +func (m *TokenEntry) GetBoundCIDRs() []string { + if m != nil { + return m.BoundCIDRs + } + return nil +} + +func (m *TokenEntry) GetNamespaceID() string { + if m != nil { + return m.NamespaceID + } + return "" +} + +func (m *TokenEntry) GetCubbyholeID() string { + if m != nil { + return m.CubbyholeID + } + return "" +} + +func (m *TokenEntry) GetType() uint32 { + if m != nil { + return m.Type + } + return 0 +} + +type LeaseOptions struct { + TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + Renewable bool `sentinel:"" protobuf:"varint,2,opt,name=renewable,proto3" json:"renewable,omitempty"` + Increment int64 `sentinel:"" protobuf:"varint,3,opt,name=increment,proto3" json:"increment,omitempty"` + IssueTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,4,opt,name=issue_time,json=issueTime,proto3" json:"issue_time,omitempty"` + MaxTTL int64 `sentinel:"" protobuf:"varint,5,opt,name=MaxTTL,proto3" json:"MaxTTL,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseOptions) Reset() { *m = LeaseOptions{} } +func (m *LeaseOptions) String() string { return proto.CompactTextString(m) } +func (*LeaseOptions) ProtoMessage() {} +func (*LeaseOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{7} +} + +func (m *LeaseOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LeaseOptions.Unmarshal(m, b) +} +func (m *LeaseOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LeaseOptions.Marshal(b, m, deterministic) +} +func (m *LeaseOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseOptions.Merge(m, src) +} +func (m *LeaseOptions) XXX_Size() int { + return xxx_messageInfo_LeaseOptions.Size(m) +} +func (m *LeaseOptions) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseOptions proto.InternalMessageInfo + +func (m *LeaseOptions) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseOptions) GetRenewable() bool { + if m != nil { + return m.Renewable + } + return false +} + +func (m *LeaseOptions) GetIncrement() int64 { + if m != nil { + return m.Increment + } + return 0 +} + +func (m *LeaseOptions) GetIssueTime() *timestamp.Timestamp { + if m != nil { + return m.IssueTime + } + return nil +} + +func (m *LeaseOptions) GetMaxTTL() int64 { + if m != nil { + return m.MaxTTL + } + return 0 +} + +type Secret struct { + LeaseOptions *LeaseOptions `sentinel:"" protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"` + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData string `sentinel:"" protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"` + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `sentinel:"" protobuf:"bytes,3,opt,name=lease_id,json=leaseId,proto3" json:"lease_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (m *Secret) String() string { return proto.CompactTextString(m) } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{8} +} + +func (m *Secret) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Secret.Unmarshal(m, b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Secret.Marshal(b, m, deterministic) +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return xxx_messageInfo_Secret.Size(m) +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) +} + +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *Secret) GetLeaseOptions() *LeaseOptions { + if m != nil { + return m.LeaseOptions + } + return nil +} + +func (m *Secret) GetInternalData() string { + if m != nil { + return m.InternalData + } + return "" +} + +func (m *Secret) GetLeaseID() string { + if m != nil { + return m.LeaseID + } + return "" +} + +type Response struct { + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `sentinel:"" protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `sentinel:"" protobuf:"bytes,2,opt,name=auth,proto3" json:"auth,omitempty"` + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data string `sentinel:"" protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `sentinel:"" protobuf:"bytes,4,opt,name=redirect,proto3" json:"redirect,omitempty"` + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `sentinel:"" protobuf:"bytes,5,rep,name=warnings,proto3" json:"warnings,omitempty"` + // Information for wrapping the response in a cubbyhole + WrapInfo *ResponseWrapInfo `sentinel:"" protobuf:"bytes,6,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string]*Header `sentinel:"" protobuf:"bytes,7,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{9} +} + +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetSecret() *Secret { + if m != nil { + return m.Secret + } + return nil +} + +func (m *Response) GetAuth() *Auth { + if m != nil { + return m.Auth + } + return nil +} + +func (m *Response) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func (m *Response) GetRedirect() string { + if m != nil { + return m.Redirect + } + return "" +} + +func (m *Response) GetWarnings() []string { + if m != nil { + return m.Warnings + } + return nil +} + +func (m *Response) GetWrapInfo() *ResponseWrapInfo { + if m != nil { + return m.WrapInfo + } + return nil +} + +func (m *Response) GetHeaders() map[string]*Header { + if m != nil { + return m.Headers + } + return nil +} + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // The token containing the wrapped response + Token string `sentinel:"" protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` + // The token accessor for the wrapped response token + Accessor string `sentinel:"" protobuf:"bytes,3,opt,name=accessor,proto3" json:"accessor,omitempty"` + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + WrappedAccessor string `sentinel:"" protobuf:"bytes,5,opt,name=wrapped_accessor,json=wrappedAccessor,proto3" json:"wrapped_accessor,omitempty"` + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `sentinel:"" protobuf:"bytes,6,opt,name=wrapped_entity_id,json=wrappedEntityID,proto3" json:"wrapped_entity_id,omitempty"` + // The format to use. This doesn't get returned, it's only internal. + Format string `sentinel:"" protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `sentinel:"" protobuf:"bytes,8,opt,name=creation_path,json=creationPath,proto3" json:"creation_path,omitempty"` + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `sentinel:"" protobuf:"varint,9,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseWrapInfo) Reset() { *m = ResponseWrapInfo{} } +func (m *ResponseWrapInfo) String() string { return proto.CompactTextString(m) } +func (*ResponseWrapInfo) ProtoMessage() {} +func (*ResponseWrapInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{10} +} + +func (m *ResponseWrapInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseWrapInfo.Unmarshal(m, b) +} +func (m *ResponseWrapInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseWrapInfo.Marshal(b, m, deterministic) +} +func (m *ResponseWrapInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseWrapInfo.Merge(m, src) +} +func (m *ResponseWrapInfo) XXX_Size() int { + return xxx_messageInfo_ResponseWrapInfo.Size(m) +} +func (m *ResponseWrapInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseWrapInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseWrapInfo proto.InternalMessageInfo + +func (m *ResponseWrapInfo) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *ResponseWrapInfo) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *ResponseWrapInfo) GetAccessor() string { + if m != nil { + return m.Accessor + } + return "" +} + +func (m *ResponseWrapInfo) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *ResponseWrapInfo) GetWrappedAccessor() string { + if m != nil { + return m.WrappedAccessor + } + return "" +} + +func (m *ResponseWrapInfo) GetWrappedEntityID() string { + if m != nil { + return m.WrappedEntityID + } + return "" +} + +func (m *ResponseWrapInfo) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *ResponseWrapInfo) GetCreationPath() string { + if m != nil { + return m.CreationPath + } + return "" +} + +func (m *ResponseWrapInfo) GetSealWrap() bool { + if m != nil { + return m.SealWrap + } + return false +} + +type RequestWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `sentinel:"" protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `sentinel:"" protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestWrapInfo) Reset() { *m = RequestWrapInfo{} } +func (m *RequestWrapInfo) String() string { return proto.CompactTextString(m) } +func (*RequestWrapInfo) ProtoMessage() {} +func (*RequestWrapInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{11} +} + +func (m *RequestWrapInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestWrapInfo.Unmarshal(m, b) +} +func (m *RequestWrapInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestWrapInfo.Marshal(b, m, deterministic) +} +func (m *RequestWrapInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestWrapInfo.Merge(m, src) +} +func (m *RequestWrapInfo) XXX_Size() int { + return xxx_messageInfo_RequestWrapInfo.Size(m) +} +func (m *RequestWrapInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RequestWrapInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestWrapInfo proto.InternalMessageInfo + +func (m *RequestWrapInfo) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *RequestWrapInfo) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *RequestWrapInfo) GetSealWrap() bool { + if m != nil { + return m.SealWrap + } + return false +} + +// HandleRequestArgs is the args for HandleRequest method. +type HandleRequestArgs struct { + StorageID uint32 `sentinel:"" protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + Request *Request `sentinel:"" protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandleRequestArgs) Reset() { *m = HandleRequestArgs{} } +func (m *HandleRequestArgs) String() string { return proto.CompactTextString(m) } +func (*HandleRequestArgs) ProtoMessage() {} +func (*HandleRequestArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{12} +} + +func (m *HandleRequestArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandleRequestArgs.Unmarshal(m, b) +} +func (m *HandleRequestArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandleRequestArgs.Marshal(b, m, deterministic) +} +func (m *HandleRequestArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandleRequestArgs.Merge(m, src) +} +func (m *HandleRequestArgs) XXX_Size() int { + return xxx_messageInfo_HandleRequestArgs.Size(m) +} +func (m *HandleRequestArgs) XXX_DiscardUnknown() { + xxx_messageInfo_HandleRequestArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_HandleRequestArgs proto.InternalMessageInfo + +func (m *HandleRequestArgs) GetStorageID() uint32 { + if m != nil { + return m.StorageID + } + return 0 +} + +func (m *HandleRequestArgs) GetRequest() *Request { + if m != nil { + return m.Request + } + return nil +} + +// HandleRequestReply is the reply for HandleRequest method. +type HandleRequestReply struct { + Response *Response `sentinel:"" protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + Err *ProtoError `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandleRequestReply) Reset() { *m = HandleRequestReply{} } +func (m *HandleRequestReply) String() string { return proto.CompactTextString(m) } +func (*HandleRequestReply) ProtoMessage() {} +func (*HandleRequestReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{13} +} + +func (m *HandleRequestReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandleRequestReply.Unmarshal(m, b) +} +func (m *HandleRequestReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandleRequestReply.Marshal(b, m, deterministic) +} +func (m *HandleRequestReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandleRequestReply.Merge(m, src) +} +func (m *HandleRequestReply) XXX_Size() int { + return xxx_messageInfo_HandleRequestReply.Size(m) +} +func (m *HandleRequestReply) XXX_DiscardUnknown() { + xxx_messageInfo_HandleRequestReply.DiscardUnknown(m) +} + +var xxx_messageInfo_HandleRequestReply proto.InternalMessageInfo + +func (m *HandleRequestReply) GetResponse() *Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *HandleRequestReply) GetErr() *ProtoError { + if m != nil { + return m.Err + } + return nil +} + +// InitializeArgs is the args for Initialize method. +type InitializeArgs struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitializeArgs) Reset() { *m = InitializeArgs{} } +func (m *InitializeArgs) String() string { return proto.CompactTextString(m) } +func (*InitializeArgs) ProtoMessage() {} +func (*InitializeArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{14} +} + +func (m *InitializeArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitializeArgs.Unmarshal(m, b) +} +func (m *InitializeArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitializeArgs.Marshal(b, m, deterministic) +} +func (m *InitializeArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitializeArgs.Merge(m, src) +} +func (m *InitializeArgs) XXX_Size() int { + return xxx_messageInfo_InitializeArgs.Size(m) +} +func (m *InitializeArgs) XXX_DiscardUnknown() { + xxx_messageInfo_InitializeArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_InitializeArgs proto.InternalMessageInfo + +// InitializeReply is the reply for Initialize method. +type InitializeReply struct { + Err *ProtoError `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitializeReply) Reset() { *m = InitializeReply{} } +func (m *InitializeReply) String() string { return proto.CompactTextString(m) } +func (*InitializeReply) ProtoMessage() {} +func (*InitializeReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{15} +} + +func (m *InitializeReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitializeReply.Unmarshal(m, b) +} +func (m *InitializeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitializeReply.Marshal(b, m, deterministic) +} +func (m *InitializeReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitializeReply.Merge(m, src) +} +func (m *InitializeReply) XXX_Size() int { + return xxx_messageInfo_InitializeReply.Size(m) +} +func (m *InitializeReply) XXX_DiscardUnknown() { + xxx_messageInfo_InitializeReply.DiscardUnknown(m) +} + +var xxx_messageInfo_InitializeReply proto.InternalMessageInfo + +func (m *InitializeReply) GetErr() *ProtoError { + if m != nil { + return m.Err + } + return nil +} + +// SpecialPathsReply is the reply for SpecialPaths method. +type SpecialPathsReply struct { + Paths *Paths `sentinel:"" protobuf:"bytes,1,opt,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SpecialPathsReply) Reset() { *m = SpecialPathsReply{} } +func (m *SpecialPathsReply) String() string { return proto.CompactTextString(m) } +func (*SpecialPathsReply) ProtoMessage() {} +func (*SpecialPathsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{16} +} + +func (m *SpecialPathsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SpecialPathsReply.Unmarshal(m, b) +} +func (m *SpecialPathsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SpecialPathsReply.Marshal(b, m, deterministic) +} +func (m *SpecialPathsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpecialPathsReply.Merge(m, src) +} +func (m *SpecialPathsReply) XXX_Size() int { + return xxx_messageInfo_SpecialPathsReply.Size(m) +} +func (m *SpecialPathsReply) XXX_DiscardUnknown() { + xxx_messageInfo_SpecialPathsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SpecialPathsReply proto.InternalMessageInfo + +func (m *SpecialPathsReply) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +// HandleExistenceCheckArgs is the args for HandleExistenceCheck method. +type HandleExistenceCheckArgs struct { + StorageID uint32 `sentinel:"" protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + Request *Request `sentinel:"" protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandleExistenceCheckArgs) Reset() { *m = HandleExistenceCheckArgs{} } +func (m *HandleExistenceCheckArgs) String() string { return proto.CompactTextString(m) } +func (*HandleExistenceCheckArgs) ProtoMessage() {} +func (*HandleExistenceCheckArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{17} +} + +func (m *HandleExistenceCheckArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandleExistenceCheckArgs.Unmarshal(m, b) +} +func (m *HandleExistenceCheckArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandleExistenceCheckArgs.Marshal(b, m, deterministic) +} +func (m *HandleExistenceCheckArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandleExistenceCheckArgs.Merge(m, src) +} +func (m *HandleExistenceCheckArgs) XXX_Size() int { + return xxx_messageInfo_HandleExistenceCheckArgs.Size(m) +} +func (m *HandleExistenceCheckArgs) XXX_DiscardUnknown() { + xxx_messageInfo_HandleExistenceCheckArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_HandleExistenceCheckArgs proto.InternalMessageInfo + +func (m *HandleExistenceCheckArgs) GetStorageID() uint32 { + if m != nil { + return m.StorageID + } + return 0 +} + +func (m *HandleExistenceCheckArgs) GetRequest() *Request { + if m != nil { + return m.Request + } + return nil +} + +// HandleExistenceCheckReply is the reply for HandleExistenceCheck method. +type HandleExistenceCheckReply struct { + CheckFound bool `sentinel:"" protobuf:"varint,1,opt,name=check_found,json=checkFound,proto3" json:"check_found,omitempty"` + Exists bool `sentinel:"" protobuf:"varint,2,opt,name=exists,proto3" json:"exists,omitempty"` + Err *ProtoError `sentinel:"" protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandleExistenceCheckReply) Reset() { *m = HandleExistenceCheckReply{} } +func (m *HandleExistenceCheckReply) String() string { return proto.CompactTextString(m) } +func (*HandleExistenceCheckReply) ProtoMessage() {} +func (*HandleExistenceCheckReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{18} +} + +func (m *HandleExistenceCheckReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandleExistenceCheckReply.Unmarshal(m, b) +} +func (m *HandleExistenceCheckReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandleExistenceCheckReply.Marshal(b, m, deterministic) +} +func (m *HandleExistenceCheckReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandleExistenceCheckReply.Merge(m, src) +} +func (m *HandleExistenceCheckReply) XXX_Size() int { + return xxx_messageInfo_HandleExistenceCheckReply.Size(m) +} +func (m *HandleExistenceCheckReply) XXX_DiscardUnknown() { + xxx_messageInfo_HandleExistenceCheckReply.DiscardUnknown(m) +} + +var xxx_messageInfo_HandleExistenceCheckReply proto.InternalMessageInfo + +func (m *HandleExistenceCheckReply) GetCheckFound() bool { + if m != nil { + return m.CheckFound + } + return false +} + +func (m *HandleExistenceCheckReply) GetExists() bool { + if m != nil { + return m.Exists + } + return false +} + +func (m *HandleExistenceCheckReply) GetErr() *ProtoError { + if m != nil { + return m.Err + } + return nil +} + +// SetupArgs is the args for Setup method. +type SetupArgs struct { + BrokerID uint32 `sentinel:"" protobuf:"varint,1,opt,name=broker_id,json=brokerId,proto3" json:"broker_id,omitempty"` + Config map[string]string `sentinel:"" protobuf:"bytes,2,rep,name=Config,proto3" json:"Config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + BackendUUID string `sentinel:"" protobuf:"bytes,3,opt,name=backendUUID,proto3" json:"backendUUID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetupArgs) Reset() { *m = SetupArgs{} } +func (m *SetupArgs) String() string { return proto.CompactTextString(m) } +func (*SetupArgs) ProtoMessage() {} +func (*SetupArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{19} +} + +func (m *SetupArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetupArgs.Unmarshal(m, b) +} +func (m *SetupArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetupArgs.Marshal(b, m, deterministic) +} +func (m *SetupArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetupArgs.Merge(m, src) +} +func (m *SetupArgs) XXX_Size() int { + return xxx_messageInfo_SetupArgs.Size(m) +} +func (m *SetupArgs) XXX_DiscardUnknown() { + xxx_messageInfo_SetupArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_SetupArgs proto.InternalMessageInfo + +func (m *SetupArgs) GetBrokerID() uint32 { + if m != nil { + return m.BrokerID + } + return 0 +} + +func (m *SetupArgs) GetConfig() map[string]string { + if m != nil { + return m.Config + } + return nil +} + +func (m *SetupArgs) GetBackendUUID() string { + if m != nil { + return m.BackendUUID + } + return "" +} + +// SetupReply is the reply for Setup method. +type SetupReply struct { + Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetupReply) Reset() { *m = SetupReply{} } +func (m *SetupReply) String() string { return proto.CompactTextString(m) } +func (*SetupReply) ProtoMessage() {} +func (*SetupReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{20} +} + +func (m *SetupReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetupReply.Unmarshal(m, b) +} +func (m *SetupReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetupReply.Marshal(b, m, deterministic) +} +func (m *SetupReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetupReply.Merge(m, src) +} +func (m *SetupReply) XXX_Size() int { + return xxx_messageInfo_SetupReply.Size(m) +} +func (m *SetupReply) XXX_DiscardUnknown() { + xxx_messageInfo_SetupReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SetupReply proto.InternalMessageInfo + +func (m *SetupReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +// TypeReply is the reply for the Type method. +type TypeReply struct { + Type uint32 `sentinel:"" protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeReply) Reset() { *m = TypeReply{} } +func (m *TypeReply) String() string { return proto.CompactTextString(m) } +func (*TypeReply) ProtoMessage() {} +func (*TypeReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{21} +} + +func (m *TypeReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeReply.Unmarshal(m, b) +} +func (m *TypeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeReply.Marshal(b, m, deterministic) +} +func (m *TypeReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeReply.Merge(m, src) +} +func (m *TypeReply) XXX_Size() int { + return xxx_messageInfo_TypeReply.Size(m) +} +func (m *TypeReply) XXX_DiscardUnknown() { + xxx_messageInfo_TypeReply.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeReply proto.InternalMessageInfo + +func (m *TypeReply) GetType() uint32 { + if m != nil { + return m.Type + } + return 0 +} + +type InvalidateKeyArgs struct { + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InvalidateKeyArgs) Reset() { *m = InvalidateKeyArgs{} } +func (m *InvalidateKeyArgs) String() string { return proto.CompactTextString(m) } +func (*InvalidateKeyArgs) ProtoMessage() {} +func (*InvalidateKeyArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{22} +} + +func (m *InvalidateKeyArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InvalidateKeyArgs.Unmarshal(m, b) +} +func (m *InvalidateKeyArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InvalidateKeyArgs.Marshal(b, m, deterministic) +} +func (m *InvalidateKeyArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_InvalidateKeyArgs.Merge(m, src) +} +func (m *InvalidateKeyArgs) XXX_Size() int { + return xxx_messageInfo_InvalidateKeyArgs.Size(m) +} +func (m *InvalidateKeyArgs) XXX_DiscardUnknown() { + xxx_messageInfo_InvalidateKeyArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_InvalidateKeyArgs proto.InternalMessageInfo + +func (m *InvalidateKeyArgs) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type StorageEntry struct { + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `sentinel:"" protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + SealWrap bool `sentinel:"" protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageEntry) Reset() { *m = StorageEntry{} } +func (m *StorageEntry) String() string { return proto.CompactTextString(m) } +func (*StorageEntry) ProtoMessage() {} +func (*StorageEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{23} +} + +func (m *StorageEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageEntry.Unmarshal(m, b) +} +func (m *StorageEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageEntry.Marshal(b, m, deterministic) +} +func (m *StorageEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageEntry.Merge(m, src) +} +func (m *StorageEntry) XXX_Size() int { + return xxx_messageInfo_StorageEntry.Size(m) +} +func (m *StorageEntry) XXX_DiscardUnknown() { + xxx_messageInfo_StorageEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageEntry proto.InternalMessageInfo + +func (m *StorageEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *StorageEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *StorageEntry) GetSealWrap() bool { + if m != nil { + return m.SealWrap + } + return false +} + +type StorageListArgs struct { + Prefix string `sentinel:"" protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageListArgs) Reset() { *m = StorageListArgs{} } +func (m *StorageListArgs) String() string { return proto.CompactTextString(m) } +func (*StorageListArgs) ProtoMessage() {} +func (*StorageListArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{24} +} + +func (m *StorageListArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageListArgs.Unmarshal(m, b) +} +func (m *StorageListArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageListArgs.Marshal(b, m, deterministic) +} +func (m *StorageListArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageListArgs.Merge(m, src) +} +func (m *StorageListArgs) XXX_Size() int { + return xxx_messageInfo_StorageListArgs.Size(m) +} +func (m *StorageListArgs) XXX_DiscardUnknown() { + xxx_messageInfo_StorageListArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageListArgs proto.InternalMessageInfo + +func (m *StorageListArgs) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +type StorageListReply struct { + Keys []string `sentinel:"" protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageListReply) Reset() { *m = StorageListReply{} } +func (m *StorageListReply) String() string { return proto.CompactTextString(m) } +func (*StorageListReply) ProtoMessage() {} +func (*StorageListReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{25} +} + +func (m *StorageListReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageListReply.Unmarshal(m, b) +} +func (m *StorageListReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageListReply.Marshal(b, m, deterministic) +} +func (m *StorageListReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageListReply.Merge(m, src) +} +func (m *StorageListReply) XXX_Size() int { + return xxx_messageInfo_StorageListReply.Size(m) +} +func (m *StorageListReply) XXX_DiscardUnknown() { + xxx_messageInfo_StorageListReply.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageListReply proto.InternalMessageInfo + +func (m *StorageListReply) GetKeys() []string { + if m != nil { + return m.Keys + } + return nil +} + +func (m *StorageListReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type StorageGetArgs struct { + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageGetArgs) Reset() { *m = StorageGetArgs{} } +func (m *StorageGetArgs) String() string { return proto.CompactTextString(m) } +func (*StorageGetArgs) ProtoMessage() {} +func (*StorageGetArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{26} +} + +func (m *StorageGetArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageGetArgs.Unmarshal(m, b) +} +func (m *StorageGetArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageGetArgs.Marshal(b, m, deterministic) +} +func (m *StorageGetArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageGetArgs.Merge(m, src) +} +func (m *StorageGetArgs) XXX_Size() int { + return xxx_messageInfo_StorageGetArgs.Size(m) +} +func (m *StorageGetArgs) XXX_DiscardUnknown() { + xxx_messageInfo_StorageGetArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageGetArgs proto.InternalMessageInfo + +func (m *StorageGetArgs) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type StorageGetReply struct { + Entry *StorageEntry `sentinel:"" protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageGetReply) Reset() { *m = StorageGetReply{} } +func (m *StorageGetReply) String() string { return proto.CompactTextString(m) } +func (*StorageGetReply) ProtoMessage() {} +func (*StorageGetReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{27} +} + +func (m *StorageGetReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageGetReply.Unmarshal(m, b) +} +func (m *StorageGetReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageGetReply.Marshal(b, m, deterministic) +} +func (m *StorageGetReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageGetReply.Merge(m, src) +} +func (m *StorageGetReply) XXX_Size() int { + return xxx_messageInfo_StorageGetReply.Size(m) +} +func (m *StorageGetReply) XXX_DiscardUnknown() { + xxx_messageInfo_StorageGetReply.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageGetReply proto.InternalMessageInfo + +func (m *StorageGetReply) GetEntry() *StorageEntry { + if m != nil { + return m.Entry + } + return nil +} + +func (m *StorageGetReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type StoragePutArgs struct { + Entry *StorageEntry `sentinel:"" protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoragePutArgs) Reset() { *m = StoragePutArgs{} } +func (m *StoragePutArgs) String() string { return proto.CompactTextString(m) } +func (*StoragePutArgs) ProtoMessage() {} +func (*StoragePutArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{28} +} + +func (m *StoragePutArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StoragePutArgs.Unmarshal(m, b) +} +func (m *StoragePutArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StoragePutArgs.Marshal(b, m, deterministic) +} +func (m *StoragePutArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoragePutArgs.Merge(m, src) +} +func (m *StoragePutArgs) XXX_Size() int { + return xxx_messageInfo_StoragePutArgs.Size(m) +} +func (m *StoragePutArgs) XXX_DiscardUnknown() { + xxx_messageInfo_StoragePutArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_StoragePutArgs proto.InternalMessageInfo + +func (m *StoragePutArgs) GetEntry() *StorageEntry { + if m != nil { + return m.Entry + } + return nil +} + +type StoragePutReply struct { + Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoragePutReply) Reset() { *m = StoragePutReply{} } +func (m *StoragePutReply) String() string { return proto.CompactTextString(m) } +func (*StoragePutReply) ProtoMessage() {} +func (*StoragePutReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{29} +} + +func (m *StoragePutReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StoragePutReply.Unmarshal(m, b) +} +func (m *StoragePutReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StoragePutReply.Marshal(b, m, deterministic) +} +func (m *StoragePutReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoragePutReply.Merge(m, src) +} +func (m *StoragePutReply) XXX_Size() int { + return xxx_messageInfo_StoragePutReply.Size(m) +} +func (m *StoragePutReply) XXX_DiscardUnknown() { + xxx_messageInfo_StoragePutReply.DiscardUnknown(m) +} + +var xxx_messageInfo_StoragePutReply proto.InternalMessageInfo + +func (m *StoragePutReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type StorageDeleteArgs struct { + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageDeleteArgs) Reset() { *m = StorageDeleteArgs{} } +func (m *StorageDeleteArgs) String() string { return proto.CompactTextString(m) } +func (*StorageDeleteArgs) ProtoMessage() {} +func (*StorageDeleteArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{30} +} + +func (m *StorageDeleteArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageDeleteArgs.Unmarshal(m, b) +} +func (m *StorageDeleteArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageDeleteArgs.Marshal(b, m, deterministic) +} +func (m *StorageDeleteArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageDeleteArgs.Merge(m, src) +} +func (m *StorageDeleteArgs) XXX_Size() int { + return xxx_messageInfo_StorageDeleteArgs.Size(m) +} +func (m *StorageDeleteArgs) XXX_DiscardUnknown() { + xxx_messageInfo_StorageDeleteArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageDeleteArgs proto.InternalMessageInfo + +func (m *StorageDeleteArgs) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type StorageDeleteReply struct { + Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageDeleteReply) Reset() { *m = StorageDeleteReply{} } +func (m *StorageDeleteReply) String() string { return proto.CompactTextString(m) } +func (*StorageDeleteReply) ProtoMessage() {} +func (*StorageDeleteReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{31} +} + +func (m *StorageDeleteReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageDeleteReply.Unmarshal(m, b) +} +func (m *StorageDeleteReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageDeleteReply.Marshal(b, m, deterministic) +} +func (m *StorageDeleteReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageDeleteReply.Merge(m, src) +} +func (m *StorageDeleteReply) XXX_Size() int { + return xxx_messageInfo_StorageDeleteReply.Size(m) +} +func (m *StorageDeleteReply) XXX_DiscardUnknown() { + xxx_messageInfo_StorageDeleteReply.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageDeleteReply proto.InternalMessageInfo + +func (m *StorageDeleteReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type TTLReply struct { + TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TTLReply) Reset() { *m = TTLReply{} } +func (m *TTLReply) String() string { return proto.CompactTextString(m) } +func (*TTLReply) ProtoMessage() {} +func (*TTLReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{32} +} + +func (m *TTLReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TTLReply.Unmarshal(m, b) +} +func (m *TTLReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TTLReply.Marshal(b, m, deterministic) +} +func (m *TTLReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_TTLReply.Merge(m, src) +} +func (m *TTLReply) XXX_Size() int { + return xxx_messageInfo_TTLReply.Size(m) +} +func (m *TTLReply) XXX_DiscardUnknown() { + xxx_messageInfo_TTLReply.DiscardUnknown(m) +} + +var xxx_messageInfo_TTLReply proto.InternalMessageInfo + +func (m *TTLReply) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type TaintedReply struct { + Tainted bool `sentinel:"" protobuf:"varint,1,opt,name=tainted,proto3" json:"tainted,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaintedReply) Reset() { *m = TaintedReply{} } +func (m *TaintedReply) String() string { return proto.CompactTextString(m) } +func (*TaintedReply) ProtoMessage() {} +func (*TaintedReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{33} +} + +func (m *TaintedReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaintedReply.Unmarshal(m, b) +} +func (m *TaintedReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaintedReply.Marshal(b, m, deterministic) +} +func (m *TaintedReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaintedReply.Merge(m, src) +} +func (m *TaintedReply) XXX_Size() int { + return xxx_messageInfo_TaintedReply.Size(m) +} +func (m *TaintedReply) XXX_DiscardUnknown() { + xxx_messageInfo_TaintedReply.DiscardUnknown(m) +} + +var xxx_messageInfo_TaintedReply proto.InternalMessageInfo + +func (m *TaintedReply) GetTainted() bool { + if m != nil { + return m.Tainted + } + return false +} + +type CachingDisabledReply struct { + Disabled bool `sentinel:"" protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CachingDisabledReply) Reset() { *m = CachingDisabledReply{} } +func (m *CachingDisabledReply) String() string { return proto.CompactTextString(m) } +func (*CachingDisabledReply) ProtoMessage() {} +func (*CachingDisabledReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{34} +} + +func (m *CachingDisabledReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CachingDisabledReply.Unmarshal(m, b) +} +func (m *CachingDisabledReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CachingDisabledReply.Marshal(b, m, deterministic) +} +func (m *CachingDisabledReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachingDisabledReply.Merge(m, src) +} +func (m *CachingDisabledReply) XXX_Size() int { + return xxx_messageInfo_CachingDisabledReply.Size(m) +} +func (m *CachingDisabledReply) XXX_DiscardUnknown() { + xxx_messageInfo_CachingDisabledReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CachingDisabledReply proto.InternalMessageInfo + +func (m *CachingDisabledReply) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +type ReplicationStateReply struct { + State int32 `sentinel:"" protobuf:"varint,1,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicationStateReply) Reset() { *m = ReplicationStateReply{} } +func (m *ReplicationStateReply) String() string { return proto.CompactTextString(m) } +func (*ReplicationStateReply) ProtoMessage() {} +func (*ReplicationStateReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{35} +} + +func (m *ReplicationStateReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicationStateReply.Unmarshal(m, b) +} +func (m *ReplicationStateReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicationStateReply.Marshal(b, m, deterministic) +} +func (m *ReplicationStateReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationStateReply.Merge(m, src) +} +func (m *ReplicationStateReply) XXX_Size() int { + return xxx_messageInfo_ReplicationStateReply.Size(m) +} +func (m *ReplicationStateReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationStateReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationStateReply proto.InternalMessageInfo + +func (m *ReplicationStateReply) GetState() int32 { + if m != nil { + return m.State + } + return 0 +} + +type ResponseWrapDataArgs struct { + Data string `sentinel:"" protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + TTL int64 `sentinel:"" protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` + JWT bool `sentinel:"" protobuf:"varint,3,opt,name=JWT,proto3" json:"JWT,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseWrapDataArgs) Reset() { *m = ResponseWrapDataArgs{} } +func (m *ResponseWrapDataArgs) String() string { return proto.CompactTextString(m) } +func (*ResponseWrapDataArgs) ProtoMessage() {} +func (*ResponseWrapDataArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{36} +} + +func (m *ResponseWrapDataArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseWrapDataArgs.Unmarshal(m, b) +} +func (m *ResponseWrapDataArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseWrapDataArgs.Marshal(b, m, deterministic) +} +func (m *ResponseWrapDataArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseWrapDataArgs.Merge(m, src) +} +func (m *ResponseWrapDataArgs) XXX_Size() int { + return xxx_messageInfo_ResponseWrapDataArgs.Size(m) +} +func (m *ResponseWrapDataArgs) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseWrapDataArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseWrapDataArgs proto.InternalMessageInfo + +func (m *ResponseWrapDataArgs) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func (m *ResponseWrapDataArgs) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *ResponseWrapDataArgs) GetJWT() bool { + if m != nil { + return m.JWT + } + return false +} + +type ResponseWrapDataReply struct { + WrapInfo *ResponseWrapInfo `sentinel:"" protobuf:"bytes,1,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseWrapDataReply) Reset() { *m = ResponseWrapDataReply{} } +func (m *ResponseWrapDataReply) String() string { return proto.CompactTextString(m) } +func (*ResponseWrapDataReply) ProtoMessage() {} +func (*ResponseWrapDataReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{37} +} + +func (m *ResponseWrapDataReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseWrapDataReply.Unmarshal(m, b) +} +func (m *ResponseWrapDataReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseWrapDataReply.Marshal(b, m, deterministic) +} +func (m *ResponseWrapDataReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseWrapDataReply.Merge(m, src) +} +func (m *ResponseWrapDataReply) XXX_Size() int { + return xxx_messageInfo_ResponseWrapDataReply.Size(m) +} +func (m *ResponseWrapDataReply) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseWrapDataReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseWrapDataReply proto.InternalMessageInfo + +func (m *ResponseWrapDataReply) GetWrapInfo() *ResponseWrapInfo { + if m != nil { + return m.WrapInfo + } + return nil +} + +func (m *ResponseWrapDataReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type MlockEnabledReply struct { + Enabled bool `sentinel:"" protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MlockEnabledReply) Reset() { *m = MlockEnabledReply{} } +func (m *MlockEnabledReply) String() string { return proto.CompactTextString(m) } +func (*MlockEnabledReply) ProtoMessage() {} +func (*MlockEnabledReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{38} +} + +func (m *MlockEnabledReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MlockEnabledReply.Unmarshal(m, b) +} +func (m *MlockEnabledReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MlockEnabledReply.Marshal(b, m, deterministic) +} +func (m *MlockEnabledReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_MlockEnabledReply.Merge(m, src) +} +func (m *MlockEnabledReply) XXX_Size() int { + return xxx_messageInfo_MlockEnabledReply.Size(m) +} +func (m *MlockEnabledReply) XXX_DiscardUnknown() { + xxx_messageInfo_MlockEnabledReply.DiscardUnknown(m) +} + +var xxx_messageInfo_MlockEnabledReply proto.InternalMessageInfo + +func (m *MlockEnabledReply) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +type LocalMountReply struct { + Local bool `sentinel:"" protobuf:"varint,1,opt,name=local,proto3" json:"local,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LocalMountReply) Reset() { *m = LocalMountReply{} } +func (m *LocalMountReply) String() string { return proto.CompactTextString(m) } +func (*LocalMountReply) ProtoMessage() {} +func (*LocalMountReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{39} +} + +func (m *LocalMountReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LocalMountReply.Unmarshal(m, b) +} +func (m *LocalMountReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LocalMountReply.Marshal(b, m, deterministic) +} +func (m *LocalMountReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalMountReply.Merge(m, src) +} +func (m *LocalMountReply) XXX_Size() int { + return xxx_messageInfo_LocalMountReply.Size(m) +} +func (m *LocalMountReply) XXX_DiscardUnknown() { + xxx_messageInfo_LocalMountReply.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalMountReply proto.InternalMessageInfo + +func (m *LocalMountReply) GetLocal() bool { + if m != nil { + return m.Local + } + return false +} + +type EntityInfoArgs struct { + EntityID string `sentinel:"" protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EntityInfoArgs) Reset() { *m = EntityInfoArgs{} } +func (m *EntityInfoArgs) String() string { return proto.CompactTextString(m) } +func (*EntityInfoArgs) ProtoMessage() {} +func (*EntityInfoArgs) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{40} +} + +func (m *EntityInfoArgs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EntityInfoArgs.Unmarshal(m, b) +} +func (m *EntityInfoArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EntityInfoArgs.Marshal(b, m, deterministic) +} +func (m *EntityInfoArgs) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityInfoArgs.Merge(m, src) +} +func (m *EntityInfoArgs) XXX_Size() int { + return xxx_messageInfo_EntityInfoArgs.Size(m) +} +func (m *EntityInfoArgs) XXX_DiscardUnknown() { + xxx_messageInfo_EntityInfoArgs.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityInfoArgs proto.InternalMessageInfo + +func (m *EntityInfoArgs) GetEntityID() string { + if m != nil { + return m.EntityID + } + return "" +} + +type EntityInfoReply struct { + Entity *logical.Entity `sentinel:"" protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EntityInfoReply) Reset() { *m = EntityInfoReply{} } +func (m *EntityInfoReply) String() string { return proto.CompactTextString(m) } +func (*EntityInfoReply) ProtoMessage() {} +func (*EntityInfoReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{41} +} + +func (m *EntityInfoReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EntityInfoReply.Unmarshal(m, b) +} +func (m *EntityInfoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EntityInfoReply.Marshal(b, m, deterministic) +} +func (m *EntityInfoReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityInfoReply.Merge(m, src) +} +func (m *EntityInfoReply) XXX_Size() int { + return xxx_messageInfo_EntityInfoReply.Size(m) +} +func (m *EntityInfoReply) XXX_DiscardUnknown() { + xxx_messageInfo_EntityInfoReply.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityInfoReply proto.InternalMessageInfo + +func (m *EntityInfoReply) GetEntity() *logical.Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *EntityInfoReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type PluginEnvReply struct { + PluginEnvironment *logical.PluginEnvironment `sentinel:"" protobuf:"bytes,1,opt,name=plugin_environment,json=pluginEnvironment,proto3" json:"plugin_environment,omitempty"` + Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginEnvReply) Reset() { *m = PluginEnvReply{} } +func (m *PluginEnvReply) String() string { return proto.CompactTextString(m) } +func (*PluginEnvReply) ProtoMessage() {} +func (*PluginEnvReply) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{42} +} + +func (m *PluginEnvReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginEnvReply.Unmarshal(m, b) +} +func (m *PluginEnvReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginEnvReply.Marshal(b, m, deterministic) +} +func (m *PluginEnvReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginEnvReply.Merge(m, src) +} +func (m *PluginEnvReply) XXX_Size() int { + return xxx_messageInfo_PluginEnvReply.Size(m) +} +func (m *PluginEnvReply) XXX_DiscardUnknown() { + xxx_messageInfo_PluginEnvReply.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginEnvReply proto.InternalMessageInfo + +func (m *PluginEnvReply) GetPluginEnvironment() *logical.PluginEnvironment { + if m != nil { + return m.PluginEnvironment + } + return nil +} + +func (m *PluginEnvReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type Connection struct { + // RemoteAddr is the network address that sent the request. + RemoteAddr string `sentinel:"" protobuf:"bytes,1,opt,name=remote_addr,json=remoteAddr,proto3" json:"remote_addr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Connection) Reset() { *m = Connection{} } +func (m *Connection) String() string { return proto.CompactTextString(m) } +func (*Connection) ProtoMessage() {} +func (*Connection) Descriptor() ([]byte, []int) { + return fileDescriptor_4dbf1dfe0c11846b, []int{43} +} + +func (m *Connection) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Connection.Unmarshal(m, b) +} +func (m *Connection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Connection.Marshal(b, m, deterministic) +} +func (m *Connection) XXX_Merge(src proto.Message) { + xxx_messageInfo_Connection.Merge(m, src) +} +func (m *Connection) XXX_Size() int { + return xxx_messageInfo_Connection.Size(m) +} +func (m *Connection) XXX_DiscardUnknown() { + xxx_messageInfo_Connection.DiscardUnknown(m) +} + +var xxx_messageInfo_Connection proto.InternalMessageInfo + +func (m *Connection) GetRemoteAddr() string { + if m != nil { + return m.RemoteAddr + } + return "" +} + +func init() { + proto.RegisterType((*Empty)(nil), "pb.Empty") + proto.RegisterType((*Header)(nil), "pb.Header") + proto.RegisterType((*ProtoError)(nil), "pb.ProtoError") + proto.RegisterType((*Paths)(nil), "pb.Paths") + proto.RegisterType((*Request)(nil), "pb.Request") + proto.RegisterMapType((map[string]*Header)(nil), "pb.Request.HeadersEntry") + proto.RegisterType((*Auth)(nil), "pb.Auth") + proto.RegisterMapType((map[string]string)(nil), "pb.Auth.MetadataEntry") + proto.RegisterType((*TokenEntry)(nil), "pb.TokenEntry") + proto.RegisterMapType((map[string]string)(nil), "pb.TokenEntry.MetaEntry") + proto.RegisterType((*LeaseOptions)(nil), "pb.LeaseOptions") + proto.RegisterType((*Secret)(nil), "pb.Secret") + proto.RegisterType((*Response)(nil), "pb.Response") + proto.RegisterMapType((map[string]*Header)(nil), "pb.Response.HeadersEntry") + proto.RegisterType((*ResponseWrapInfo)(nil), "pb.ResponseWrapInfo") + proto.RegisterType((*RequestWrapInfo)(nil), "pb.RequestWrapInfo") + proto.RegisterType((*HandleRequestArgs)(nil), "pb.HandleRequestArgs") + proto.RegisterType((*HandleRequestReply)(nil), "pb.HandleRequestReply") + proto.RegisterType((*InitializeArgs)(nil), "pb.InitializeArgs") + proto.RegisterType((*InitializeReply)(nil), "pb.InitializeReply") + proto.RegisterType((*SpecialPathsReply)(nil), "pb.SpecialPathsReply") + proto.RegisterType((*HandleExistenceCheckArgs)(nil), "pb.HandleExistenceCheckArgs") + proto.RegisterType((*HandleExistenceCheckReply)(nil), "pb.HandleExistenceCheckReply") + proto.RegisterType((*SetupArgs)(nil), "pb.SetupArgs") + proto.RegisterMapType((map[string]string)(nil), "pb.SetupArgs.ConfigEntry") + proto.RegisterType((*SetupReply)(nil), "pb.SetupReply") + proto.RegisterType((*TypeReply)(nil), "pb.TypeReply") + proto.RegisterType((*InvalidateKeyArgs)(nil), "pb.InvalidateKeyArgs") + proto.RegisterType((*StorageEntry)(nil), "pb.StorageEntry") + proto.RegisterType((*StorageListArgs)(nil), "pb.StorageListArgs") + proto.RegisterType((*StorageListReply)(nil), "pb.StorageListReply") + proto.RegisterType((*StorageGetArgs)(nil), "pb.StorageGetArgs") + proto.RegisterType((*StorageGetReply)(nil), "pb.StorageGetReply") + proto.RegisterType((*StoragePutArgs)(nil), "pb.StoragePutArgs") + proto.RegisterType((*StoragePutReply)(nil), "pb.StoragePutReply") + proto.RegisterType((*StorageDeleteArgs)(nil), "pb.StorageDeleteArgs") + proto.RegisterType((*StorageDeleteReply)(nil), "pb.StorageDeleteReply") + proto.RegisterType((*TTLReply)(nil), "pb.TTLReply") + proto.RegisterType((*TaintedReply)(nil), "pb.TaintedReply") + proto.RegisterType((*CachingDisabledReply)(nil), "pb.CachingDisabledReply") + proto.RegisterType((*ReplicationStateReply)(nil), "pb.ReplicationStateReply") + proto.RegisterType((*ResponseWrapDataArgs)(nil), "pb.ResponseWrapDataArgs") + proto.RegisterType((*ResponseWrapDataReply)(nil), "pb.ResponseWrapDataReply") + proto.RegisterType((*MlockEnabledReply)(nil), "pb.MlockEnabledReply") + proto.RegisterType((*LocalMountReply)(nil), "pb.LocalMountReply") + proto.RegisterType((*EntityInfoArgs)(nil), "pb.EntityInfoArgs") + proto.RegisterType((*EntityInfoReply)(nil), "pb.EntityInfoReply") + proto.RegisterType((*PluginEnvReply)(nil), "pb.PluginEnvReply") + proto.RegisterType((*Connection)(nil), "pb.Connection") +} + +func init() { proto.RegisterFile("sdk/plugin/pb/backend.proto", fileDescriptor_4dbf1dfe0c11846b) } + +var fileDescriptor_4dbf1dfe0c11846b = []byte{ + // 2504 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xcd, 0x72, 0x1b, 0xc7, + 0x11, 0x2e, 0x00, 0xc4, 0x5f, 0xe3, 0x7f, 0x48, 0x2b, 0x2b, 0x48, 0x8e, 0xe8, 0x75, 0x24, 0xd3, + 0x8c, 0x0d, 0x5a, 0x54, 0x14, 0xcb, 0x49, 0x25, 0x29, 0x9a, 0xa2, 0x65, 0xc6, 0xa4, 0xcd, 0x5a, + 0x42, 0x71, 0xfe, 0xaa, 0xe0, 0xc1, 0xee, 0x10, 0xdc, 0xe2, 0x62, 0x77, 0x33, 0x3b, 0x4b, 0x11, + 0xb9, 0xe4, 0x96, 0x47, 0xc8, 0x1b, 0xe4, 0x9c, 0x6b, 0x6e, 0xb9, 0xba, 0x72, 0xcf, 0x2b, 0xe4, + 0x39, 0x52, 0xd3, 0x33, 0xfb, 0x07, 0x80, 0xb6, 0x5c, 0xe5, 0xdc, 0x76, 0xba, 0x7b, 0x7a, 0x66, + 0x7a, 0xbe, 0xfe, 0xba, 0x07, 0x80, 0x7b, 0x91, 0x73, 0xb5, 0x17, 0x7a, 0xf1, 0xcc, 0xf5, 0xf7, + 0xc2, 0xe9, 0xde, 0x94, 0xda, 0x57, 0xcc, 0x77, 0x46, 0x21, 0x0f, 0x44, 0x40, 0xca, 0xe1, 0x74, + 0xf8, 0x60, 0x16, 0x04, 0x33, 0x8f, 0xed, 0xa1, 0x64, 0x1a, 0x5f, 0xec, 0x09, 0x77, 0xce, 0x22, + 0x41, 0xe7, 0xa1, 0x32, 0x1a, 0x0e, 0xa5, 0x07, 0x2f, 0x98, 0xb9, 0x36, 0xf5, 0xf6, 0x5c, 0x87, + 0xf9, 0xc2, 0x15, 0x0b, 0xad, 0x33, 0xf2, 0x3a, 0xb5, 0x8a, 0xd2, 0x98, 0x75, 0xa8, 0x1e, 0xcd, + 0x43, 0xb1, 0x30, 0xb7, 0xa1, 0xf6, 0x29, 0xa3, 0x0e, 0xe3, 0xe4, 0x0e, 0xd4, 0x2e, 0xf1, 0xcb, + 0x28, 0x6d, 0x57, 0x76, 0x9a, 0x96, 0x1e, 0x99, 0x7f, 0x00, 0x38, 0x93, 0x73, 0x8e, 0x38, 0x0f, + 0x38, 0xb9, 0x0b, 0x0d, 0xc6, 0xf9, 0x44, 0x2c, 0x42, 0x66, 0x94, 0xb6, 0x4b, 0x3b, 0x1d, 0xab, + 0xce, 0x38, 0x1f, 0x2f, 0x42, 0x46, 0x7e, 0x00, 0xf2, 0x73, 0x32, 0x8f, 0x66, 0x46, 0x79, 0xbb, + 0x24, 0x3d, 0x30, 0xce, 0x4f, 0xa3, 0x59, 0x32, 0xc7, 0x0e, 0x1c, 0x66, 0x54, 0xb6, 0x4b, 0x3b, + 0x15, 0x9c, 0x73, 0x18, 0x38, 0xcc, 0xfc, 0x5b, 0x09, 0xaa, 0x67, 0x54, 0x5c, 0x46, 0x84, 0xc0, + 0x06, 0x0f, 0x02, 0xa1, 0x17, 0xc7, 0x6f, 0xb2, 0x03, 0xbd, 0xd8, 0xa7, 0xb1, 0xb8, 0x94, 0xa7, + 0xb2, 0xa9, 0x60, 0x8e, 0x51, 0x46, 0xf5, 0xb2, 0x98, 0xbc, 0x0d, 0x1d, 0x2f, 0xb0, 0xa9, 0x37, + 0x89, 0x44, 0xc0, 0xe9, 0x4c, 0xae, 0x23, 0xed, 0xda, 0x28, 0x3c, 0x57, 0x32, 0xb2, 0x0b, 0x83, + 0x88, 0x51, 0x6f, 0xf2, 0x8a, 0xd3, 0x30, 0x35, 0xdc, 0x50, 0x0e, 0xa5, 0xe2, 0x4b, 0x4e, 0x43, + 0x6d, 0x6b, 0xfe, 0xab, 0x06, 0x75, 0x8b, 0xfd, 0x29, 0x66, 0x91, 0x20, 0x5d, 0x28, 0xbb, 0x0e, + 0x9e, 0xb6, 0x69, 0x95, 0x5d, 0x87, 0x8c, 0x80, 0x58, 0x2c, 0xf4, 0xe4, 0xd2, 0x6e, 0xe0, 0x1f, + 0x7a, 0x71, 0x24, 0x18, 0xd7, 0x67, 0x5e, 0xa3, 0x21, 0xf7, 0xa1, 0x19, 0x84, 0x8c, 0xa3, 0x0c, + 0x03, 0xd0, 0xb4, 0x32, 0x81, 0x3c, 0x78, 0x48, 0xc5, 0xa5, 0xb1, 0x81, 0x0a, 0xfc, 0x96, 0x32, + 0x87, 0x0a, 0x6a, 0x54, 0x95, 0x4c, 0x7e, 0x13, 0x13, 0x6a, 0x11, 0xb3, 0x39, 0x13, 0x46, 0x6d, + 0xbb, 0xb4, 0xd3, 0xda, 0x87, 0x51, 0x38, 0x1d, 0x9d, 0xa3, 0xc4, 0xd2, 0x1a, 0x72, 0x1f, 0x36, + 0x64, 0x5c, 0x8c, 0x3a, 0x5a, 0x34, 0xa4, 0xc5, 0x41, 0x2c, 0x2e, 0x2d, 0x94, 0x92, 0x7d, 0xa8, + 0xab, 0x3b, 0x8d, 0x8c, 0xc6, 0x76, 0x65, 0xa7, 0xb5, 0x6f, 0x48, 0x03, 0x7d, 0xca, 0x91, 0x82, + 0x41, 0x74, 0xe4, 0x0b, 0xbe, 0xb0, 0x12, 0x43, 0xf2, 0x16, 0xb4, 0x6d, 0xcf, 0x65, 0xbe, 0x98, + 0x88, 0xe0, 0x8a, 0xf9, 0x46, 0x13, 0x77, 0xd4, 0x52, 0xb2, 0xb1, 0x14, 0x91, 0x7d, 0x78, 0x23, + 0x6f, 0x32, 0xa1, 0xb6, 0xcd, 0xa2, 0x28, 0xe0, 0x06, 0xa0, 0xed, 0x66, 0xce, 0xf6, 0x40, 0xab, + 0xa4, 0x5b, 0xc7, 0x8d, 0x42, 0x8f, 0x2e, 0x26, 0x3e, 0x9d, 0x33, 0xa3, 0xa5, 0xdc, 0x6a, 0xd9, + 0xe7, 0x74, 0xce, 0xc8, 0x03, 0x68, 0xcd, 0x83, 0xd8, 0x17, 0x93, 0x30, 0x70, 0x7d, 0x61, 0xb4, + 0xd1, 0x02, 0x50, 0x74, 0x26, 0x25, 0xe4, 0x4d, 0x50, 0x23, 0x05, 0xc6, 0x8e, 0x8a, 0x2b, 0x4a, + 0x10, 0x8e, 0x0f, 0xa1, 0xab, 0xd4, 0xe9, 0x7e, 0xba, 0x68, 0xd2, 0x41, 0x69, 0xba, 0x93, 0x0f, + 0xa0, 0x89, 0x78, 0x70, 0xfd, 0x8b, 0xc0, 0xe8, 0x61, 0xdc, 0x36, 0x73, 0x61, 0x91, 0x98, 0x38, + 0xf6, 0x2f, 0x02, 0xab, 0xf1, 0x4a, 0x7f, 0x91, 0x5f, 0xc0, 0xbd, 0xc2, 0x79, 0x39, 0x9b, 0x53, + 0xd7, 0x77, 0xfd, 0xd9, 0x24, 0x8e, 0x58, 0x64, 0xf4, 0x11, 0xe1, 0x46, 0xee, 0xd4, 0x56, 0x62, + 0xf0, 0x32, 0x62, 0x11, 0xb9, 0x07, 0x4d, 0x95, 0xa4, 0x13, 0xd7, 0x31, 0x06, 0xb8, 0xa5, 0x86, + 0x12, 0x1c, 0x3b, 0xe4, 0x1d, 0xe8, 0x85, 0x81, 0xe7, 0xda, 0x8b, 0x49, 0x70, 0xcd, 0x38, 0x77, + 0x1d, 0x66, 0x90, 0xed, 0xd2, 0x4e, 0xc3, 0xea, 0x2a, 0xf1, 0x17, 0x5a, 0xba, 0x2e, 0x35, 0x36, + 0xd1, 0x70, 0x25, 0x35, 0x46, 0x00, 0x76, 0xe0, 0xfb, 0xcc, 0x46, 0xf8, 0x6d, 0xe1, 0x09, 0xbb, + 0xf2, 0x84, 0x87, 0xa9, 0xd4, 0xca, 0x59, 0x0c, 0x3f, 0x81, 0x76, 0x1e, 0x0a, 0xa4, 0x0f, 0x95, + 0x2b, 0xb6, 0xd0, 0xf0, 0x97, 0x9f, 0x64, 0x1b, 0xaa, 0xd7, 0xd4, 0x8b, 0x19, 0x42, 0x5e, 0x03, + 0x51, 0x4d, 0xb1, 0x94, 0xe2, 0x67, 0xe5, 0x67, 0x25, 0xf3, 0xbf, 0x55, 0xd8, 0x90, 0xe0, 0x23, + 0x4f, 0xa1, 0xe3, 0x31, 0x1a, 0xb1, 0x49, 0x10, 0xca, 0x05, 0x22, 0x74, 0xd5, 0xda, 0xef, 0xcb, + 0x69, 0x27, 0x52, 0xf1, 0x85, 0x92, 0x5b, 0x6d, 0x2f, 0x37, 0x92, 0x29, 0xed, 0xfa, 0x82, 0x71, + 0x9f, 0x7a, 0x13, 0x4c, 0x06, 0x95, 0x60, 0xed, 0x44, 0xf8, 0x5c, 0x26, 0xc5, 0x32, 0x8e, 0x2a, + 0xab, 0x38, 0x1a, 0x42, 0x03, 0x63, 0xe7, 0xb2, 0x48, 0x27, 0x7b, 0x3a, 0x26, 0xfb, 0xd0, 0x98, + 0x33, 0x41, 0x75, 0xae, 0xc9, 0x94, 0xb8, 0x93, 0xe4, 0xcc, 0xe8, 0x54, 0x2b, 0x54, 0x42, 0xa4, + 0x76, 0x2b, 0x19, 0x51, 0x5b, 0xcd, 0x88, 0x21, 0x34, 0x52, 0xd0, 0xd5, 0xd5, 0x0d, 0x27, 0x63, + 0x49, 0xb3, 0x21, 0xe3, 0x6e, 0xe0, 0x18, 0x0d, 0x04, 0x8a, 0x1e, 0x49, 0x92, 0xf4, 0xe3, 0xb9, + 0x82, 0x50, 0x53, 0x91, 0xa4, 0x1f, 0xcf, 0x57, 0x11, 0x03, 0x4b, 0x88, 0xf9, 0x11, 0x54, 0xa9, + 0xe7, 0xd2, 0x08, 0x53, 0x48, 0xde, 0xac, 0xe6, 0xfb, 0xd1, 0x81, 0x94, 0x5a, 0x4a, 0x49, 0x9e, + 0x40, 0x67, 0xc6, 0x83, 0x38, 0x9c, 0xe0, 0x90, 0x45, 0x46, 0x1b, 0x4f, 0xbb, 0x6c, 0xdd, 0x46, + 0xa3, 0x03, 0x65, 0x23, 0x33, 0x70, 0x1a, 0xc4, 0xbe, 0x33, 0xb1, 0x5d, 0x87, 0x47, 0x46, 0x07, + 0x83, 0x07, 0x28, 0x3a, 0x94, 0x12, 0x99, 0x62, 0x2a, 0x05, 0xd2, 0x00, 0x77, 0xd1, 0xa6, 0x83, + 0xd2, 0xb3, 0x24, 0xca, 0x3f, 0x86, 0x41, 0x52, 0x98, 0x32, 0xcb, 0x1e, 0x5a, 0xf6, 0x13, 0x45, + 0x6a, 0xbc, 0x03, 0x7d, 0x76, 0x23, 0x29, 0xd4, 0x15, 0x93, 0x39, 0xbd, 0x99, 0x08, 0xe1, 0xe9, + 0x94, 0xea, 0x26, 0xf2, 0x53, 0x7a, 0x33, 0x16, 0x9e, 0xcc, 0x7f, 0xb5, 0x3a, 0xe6, 0xff, 0x00, + 0x8b, 0x51, 0x13, 0x25, 0x98, 0xff, 0xbb, 0x30, 0xf0, 0x83, 0x89, 0xc3, 0x2e, 0x68, 0xec, 0x09, + 0xb5, 0xee, 0x42, 0x27, 0x53, 0xcf, 0x0f, 0x9e, 0x2b, 0x39, 0x2e, 0xbb, 0x18, 0xfe, 0x1c, 0x3a, + 0x85, 0xeb, 0x5e, 0x03, 0xfa, 0xad, 0x3c, 0xe8, 0x9b, 0x79, 0xa0, 0xff, 0x7b, 0x03, 0x00, 0xef, + 0x5d, 0x4d, 0x5d, 0xae, 0x16, 0x79, 0x30, 0x94, 0xd7, 0x80, 0x81, 0x72, 0xe6, 0x0b, 0x0d, 0x5c, + 0x3d, 0xfa, 0x46, 0xcc, 0x26, 0xf5, 0xa2, 0x9a, 0xab, 0x17, 0xef, 0xc1, 0x86, 0xc4, 0xa7, 0x51, + 0xcb, 0x68, 0x3d, 0xdb, 0x11, 0x22, 0x59, 0xa1, 0x18, 0xad, 0x56, 0x92, 0xa6, 0xbe, 0x9a, 0x34, + 0x79, 0x34, 0x36, 0x8a, 0x68, 0x7c, 0x1b, 0x3a, 0x36, 0x67, 0x58, 0xbb, 0x26, 0xb2, 0x19, 0xd1, + 0x68, 0x6d, 0x27, 0xc2, 0xb1, 0x3b, 0x67, 0x32, 0x7e, 0xf2, 0xe2, 0x00, 0x55, 0xf2, 0x73, 0xed, + 0xbd, 0xb6, 0xd6, 0xde, 0x2b, 0x76, 0x02, 0x1e, 0xd3, 0x8c, 0x8f, 0xdf, 0xb9, 0xac, 0xe9, 0x14, + 0xb2, 0xa6, 0x90, 0x1a, 0xdd, 0xa5, 0xd4, 0x58, 0xc2, 0x6f, 0x6f, 0x05, 0xbf, 0x6f, 0x41, 0x5b, + 0x06, 0x20, 0x0a, 0xa9, 0xcd, 0xa4, 0x83, 0xbe, 0x0a, 0x44, 0x2a, 0x3b, 0x76, 0x30, 0xdb, 0xe3, + 0xe9, 0x74, 0x71, 0x19, 0x78, 0x2c, 0x23, 0xec, 0x56, 0x2a, 0x3b, 0x76, 0xe4, 0x7e, 0x11, 0x81, + 0x04, 0x11, 0x88, 0xdf, 0xc3, 0x0f, 0xa1, 0x99, 0x46, 0xfd, 0x3b, 0x81, 0xe9, 0x1f, 0x25, 0x68, + 0xe7, 0x49, 0x51, 0x4e, 0x1e, 0x8f, 0x4f, 0x70, 0x72, 0xc5, 0x92, 0x9f, 0xb2, 0x9d, 0xe0, 0xcc, + 0x67, 0xaf, 0xe8, 0xd4, 0x53, 0x0e, 0x1a, 0x56, 0x26, 0x90, 0x5a, 0xd7, 0xb7, 0x39, 0x9b, 0x27, + 0xa8, 0xaa, 0x58, 0x99, 0x80, 0x7c, 0x04, 0xe0, 0x46, 0x51, 0xcc, 0xd4, 0xcd, 0x6d, 0x20, 0x65, + 0x0c, 0x47, 0xaa, 0xc7, 0x1c, 0x25, 0x3d, 0xe6, 0x68, 0x9c, 0xf4, 0x98, 0x56, 0x13, 0xad, 0xf1, + 0x4a, 0xef, 0x40, 0x4d, 0x5e, 0xd0, 0xf8, 0x04, 0x91, 0x57, 0xb1, 0xf4, 0xc8, 0xfc, 0x0b, 0xd4, + 0x54, 0x17, 0xf2, 0x7f, 0x25, 0xfa, 0xbb, 0xd0, 0x50, 0xbe, 0x5d, 0x47, 0xe7, 0x4a, 0x1d, 0xc7, + 0xc7, 0x8e, 0xf9, 0x75, 0x19, 0x1a, 0x16, 0x8b, 0xc2, 0xc0, 0x8f, 0x58, 0xae, 0x4b, 0x2a, 0x7d, + 0x6b, 0x97, 0x54, 0x5e, 0xdb, 0x25, 0x25, 0xbd, 0x57, 0x25, 0xd7, 0x7b, 0x0d, 0xa1, 0xc1, 0x99, + 0xe3, 0x72, 0x66, 0x0b, 0xdd, 0xa7, 0xa5, 0x63, 0xa9, 0x7b, 0x45, 0xb9, 0x2c, 0xef, 0x11, 0xd6, + 0x90, 0xa6, 0x95, 0x8e, 0xc9, 0xe3, 0x7c, 0x73, 0xa1, 0xda, 0xb6, 0x2d, 0xd5, 0x5c, 0xa8, 0xed, + 0xae, 0xe9, 0x2e, 0x9e, 0x64, 0x4d, 0x5a, 0x1d, 0xb3, 0xf9, 0x6e, 0x7e, 0xc2, 0xfa, 0x2e, 0xed, + 0x7b, 0xab, 0xd9, 0x5f, 0x97, 0xa1, 0xbf, 0xbc, 0xb7, 0x35, 0x08, 0xdc, 0x82, 0xaa, 0xaa, 0x7d, + 0x1a, 0xbe, 0x62, 0xa5, 0xea, 0x55, 0x96, 0x88, 0xee, 0x57, 0xcb, 0xa4, 0xf1, 0xed, 0xd0, 0x2b, + 0x12, 0xca, 0xbb, 0xd0, 0x97, 0x21, 0x0a, 0x99, 0x93, 0xf5, 0x73, 0x8a, 0x01, 0x7b, 0x5a, 0x9e, + 0x76, 0x74, 0xbb, 0x30, 0x48, 0x4c, 0x33, 0x6e, 0xa8, 0x15, 0x6c, 0x8f, 0x12, 0x8a, 0xb8, 0x03, + 0xb5, 0x8b, 0x80, 0xcf, 0xa9, 0xd0, 0x24, 0xa8, 0x47, 0x05, 0x92, 0x43, 0xb6, 0x6d, 0x28, 0x4c, + 0x26, 0x42, 0xf9, 0x66, 0x91, 0xe4, 0x93, 0xbe, 0x27, 0x90, 0x05, 0x1b, 0x56, 0x23, 0x79, 0x47, + 0x98, 0xbf, 0x85, 0xde, 0x52, 0x0b, 0xb9, 0x26, 0x90, 0xd9, 0xf2, 0xe5, 0xc2, 0xf2, 0x05, 0xcf, + 0x95, 0x25, 0xcf, 0xbf, 0x83, 0xc1, 0xa7, 0xd4, 0x77, 0x3c, 0xa6, 0xfd, 0x1f, 0xf0, 0x59, 0x24, + 0x8b, 0xa1, 0x7e, 0xd1, 0x4c, 0x74, 0xf5, 0xe9, 0x58, 0x4d, 0x2d, 0x39, 0x76, 0xc8, 0x43, 0xa8, + 0x73, 0x65, 0xad, 0x01, 0xd0, 0xca, 0xf5, 0xb8, 0x56, 0xa2, 0x33, 0xbf, 0x02, 0x52, 0x70, 0x2d, + 0x1f, 0x33, 0x0b, 0xb2, 0x23, 0xd1, 0xaf, 0x40, 0xa1, 0xb3, 0xaa, 0x9d, 0xc7, 0xa4, 0x95, 0x6a, + 0xc9, 0x36, 0x54, 0x18, 0xe7, 0x7a, 0x09, 0x6c, 0x32, 0xb3, 0xa7, 0xa3, 0x25, 0x55, 0x66, 0x1f, + 0xba, 0xc7, 0xbe, 0x2b, 0x5c, 0xea, 0xb9, 0x7f, 0x66, 0x72, 0xe7, 0xe6, 0x13, 0xe8, 0x65, 0x12, + 0xb5, 0xa0, 0x76, 0x53, 0xba, 0xdd, 0xcd, 0x4f, 0x60, 0x70, 0x1e, 0x32, 0xdb, 0xa5, 0x1e, 0xbe, + 0x1e, 0xd5, 0xb4, 0x07, 0x50, 0x95, 0x77, 0x95, 0xf0, 0x4e, 0x13, 0x27, 0xa2, 0x5a, 0xc9, 0xcd, + 0xaf, 0xc0, 0x50, 0xc7, 0x3b, 0xba, 0x71, 0x23, 0xc1, 0x7c, 0x9b, 0x1d, 0x5e, 0x32, 0xfb, 0xea, + 0x7b, 0x0c, 0xe0, 0x35, 0xdc, 0x5d, 0xb7, 0x42, 0xb2, 0xbf, 0x96, 0x2d, 0x47, 0x93, 0x0b, 0x59, + 0x82, 0x70, 0x8d, 0x86, 0x05, 0x28, 0xfa, 0x44, 0x4a, 0x24, 0x1c, 0x98, 0x9c, 0x17, 0x69, 0x5a, + 0xd7, 0xa3, 0x24, 0x1e, 0x95, 0xdb, 0xe3, 0xf1, 0xcf, 0x12, 0x34, 0xcf, 0x99, 0x88, 0x43, 0x3c, + 0xcb, 0x3d, 0x68, 0x4e, 0x79, 0x70, 0xc5, 0x78, 0x76, 0x94, 0x86, 0x12, 0x1c, 0x3b, 0xe4, 0x31, + 0xd4, 0x0e, 0x03, 0xff, 0xc2, 0x9d, 0xe1, 0x5b, 0x5a, 0xf3, 0x4b, 0x3a, 0x77, 0xa4, 0x74, 0x8a, + 0x5f, 0xb4, 0x21, 0xd9, 0x86, 0x96, 0xfe, 0x65, 0xe2, 0xe5, 0xcb, 0xe3, 0xe7, 0x49, 0x93, 0x9d, + 0x13, 0x0d, 0x3f, 0x82, 0x56, 0x6e, 0xe2, 0x77, 0xaa, 0x78, 0x3f, 0x04, 0xc0, 0xd5, 0x55, 0x8c, + 0xfa, 0xd9, 0xd5, 0x37, 0xd5, 0xd1, 0x1e, 0x40, 0x53, 0xf6, 0x73, 0x4a, 0x9d, 0xd4, 0xda, 0x52, + 0x56, 0x6b, 0xcd, 0x87, 0x30, 0x38, 0xf6, 0xaf, 0xa9, 0xe7, 0x3a, 0x54, 0xb0, 0xcf, 0xd8, 0x02, + 0x43, 0xb0, 0xb2, 0x03, 0xf3, 0x1c, 0xda, 0xfa, 0x71, 0xff, 0x5a, 0x7b, 0x6c, 0xeb, 0x3d, 0x7e, + 0x73, 0x2e, 0xbe, 0x0b, 0x3d, 0xed, 0xf4, 0xc4, 0xd5, 0x99, 0x28, 0x5b, 0x15, 0xce, 0x2e, 0xdc, + 0x1b, 0xed, 0x5a, 0x8f, 0xcc, 0x67, 0xd0, 0xcf, 0x99, 0xa6, 0xc7, 0xb9, 0x62, 0x8b, 0x28, 0xf9, + 0xd1, 0x43, 0x7e, 0x27, 0x11, 0x28, 0x67, 0x11, 0x30, 0xa1, 0xab, 0x67, 0xbe, 0x60, 0xe2, 0x96, + 0xd3, 0x7d, 0x96, 0x6e, 0xe4, 0x05, 0xd3, 0xce, 0x1f, 0x41, 0x95, 0xc9, 0x93, 0xe6, 0xcb, 0x70, + 0x3e, 0x02, 0x96, 0x52, 0xaf, 0x59, 0xf0, 0x59, 0xba, 0xe0, 0x59, 0xac, 0x16, 0x7c, 0x4d, 0x5f, + 0xe6, 0xdb, 0xe9, 0x36, 0xce, 0x62, 0x71, 0xdb, 0x8d, 0x3e, 0x84, 0x81, 0x36, 0x7a, 0xce, 0x3c, + 0x26, 0xd8, 0x2d, 0x47, 0x7a, 0x04, 0xa4, 0x60, 0x76, 0x9b, 0xbb, 0xfb, 0xd0, 0x18, 0x8f, 0x4f, + 0x52, 0x6d, 0x91, 0x62, 0xcd, 0x1d, 0x68, 0x8f, 0xa9, 0x6c, 0x25, 0x1c, 0x65, 0x61, 0x40, 0x5d, + 0xa8, 0xb1, 0x4e, 0xc0, 0x64, 0x68, 0xee, 0xc3, 0xd6, 0x21, 0xb5, 0x2f, 0x5d, 0x7f, 0xf6, 0xdc, + 0x8d, 0x64, 0x2f, 0xa5, 0x67, 0x0c, 0xa1, 0xe1, 0x68, 0x81, 0x9e, 0x92, 0x8e, 0xcd, 0xf7, 0xe1, + 0x8d, 0xdc, 0x0f, 0x3e, 0xe7, 0x82, 0x26, 0xdb, 0xdc, 0x82, 0x6a, 0x24, 0x47, 0x38, 0xa3, 0x6a, + 0xa9, 0x81, 0xf9, 0x39, 0x6c, 0xe5, 0xcb, 0xab, 0xec, 0x6c, 0xf0, 0xf0, 0x49, 0xcf, 0x51, 0xca, + 0xf5, 0x1c, 0xfa, 0x28, 0xe5, 0xac, 0x5a, 0xf4, 0xa1, 0xf2, 0xeb, 0x2f, 0xc7, 0x1a, 0x83, 0xf2, + 0xd3, 0xfc, 0xa3, 0x5c, 0xbe, 0xe8, 0x4f, 0x2d, 0x5f, 0x68, 0x3c, 0x4a, 0xaf, 0xd5, 0x78, 0xac, + 0xc2, 0xe0, 0x7d, 0x18, 0x9c, 0x7a, 0x81, 0x7d, 0x75, 0xe4, 0xe7, 0xa2, 0x61, 0x40, 0x9d, 0xf9, + 0xf9, 0x60, 0x24, 0x43, 0xf3, 0x1d, 0xe8, 0x9d, 0x04, 0x36, 0xf5, 0x4e, 0x83, 0xd8, 0x17, 0x69, + 0x14, 0xf0, 0x17, 0x38, 0x6d, 0xaa, 0x06, 0xe6, 0xfb, 0xd0, 0xd5, 0x05, 0xd8, 0xbf, 0x08, 0x12, + 0xc2, 0xca, 0x4a, 0x75, 0xa9, 0xd8, 0xc6, 0x9b, 0x27, 0xd0, 0xcb, 0xcc, 0x95, 0xdf, 0x77, 0xa0, + 0xa6, 0xd4, 0xfa, 0x6c, 0xbd, 0xf4, 0x1d, 0xab, 0x2c, 0x2d, 0xad, 0x5e, 0x73, 0xa8, 0x39, 0x74, + 0xcf, 0xf0, 0x97, 0xd0, 0x23, 0xff, 0x5a, 0x39, 0x3b, 0x06, 0xa2, 0x7e, 0x1b, 0x9d, 0x30, 0xff, + 0xda, 0xe5, 0x81, 0x8f, 0xad, 0x73, 0x49, 0x37, 0x28, 0x89, 0xe3, 0x74, 0x52, 0x62, 0x61, 0x0d, + 0xc2, 0x65, 0xd1, 0xda, 0x18, 0x42, 0xf6, 0x3b, 0x8b, 0xac, 0x00, 0x9c, 0xcd, 0x03, 0xc1, 0x26, + 0xd4, 0x71, 0x12, 0x10, 0x83, 0x12, 0x1d, 0x38, 0x0e, 0xdf, 0xff, 0x7b, 0x05, 0xea, 0x1f, 0x2b, + 0x5e, 0x25, 0xbf, 0x84, 0x4e, 0xa1, 0x18, 0x93, 0x37, 0xb0, 0x69, 0x5b, 0x2e, 0xfd, 0xc3, 0x3b, + 0x2b, 0x62, 0x75, 0xae, 0x0f, 0xa0, 0x9d, 0xaf, 0x91, 0x04, 0xeb, 0x21, 0xfe, 0xea, 0x3b, 0x44, + 0x4f, 0xab, 0x05, 0xf4, 0x1c, 0xb6, 0xd6, 0x55, 0x2f, 0x72, 0x3f, 0x5b, 0x61, 0xb5, 0x72, 0x0e, + 0xdf, 0xbc, 0x4d, 0x9b, 0x54, 0xbd, 0xfa, 0xa1, 0xc7, 0xa8, 0x1f, 0x87, 0xf9, 0x1d, 0x64, 0x9f, + 0xe4, 0x31, 0x74, 0x0a, 0xfc, 0xad, 0xce, 0xb9, 0x42, 0xe9, 0xf9, 0x29, 0x8f, 0xa0, 0x8a, 0x35, + 0x83, 0x74, 0x0a, 0xc5, 0x6b, 0xd8, 0x4d, 0x87, 0x6a, 0xed, 0xa7, 0x00, 0x59, 0x6f, 0x41, 0x88, + 0xf2, 0x9b, 0xef, 0x3e, 0x86, 0x9b, 0x45, 0x59, 0xd2, 0x7f, 0x6c, 0xe0, 0x4f, 0x08, 0xb9, 0xfd, + 0xe2, 0x42, 0x69, 0x1d, 0xda, 0xff, 0x4f, 0x09, 0xea, 0xc9, 0xcf, 0xca, 0x8f, 0x61, 0x43, 0x32, + 0x3a, 0xd9, 0xcc, 0x91, 0x62, 0x52, 0x0d, 0x86, 0x5b, 0x4b, 0x42, 0xb5, 0xc0, 0x08, 0x2a, 0x2f, + 0x98, 0x50, 0x1b, 0x2a, 0x52, 0xfb, 0x70, 0xb3, 0x28, 0x4b, 0xed, 0xcf, 0xe2, 0xa2, 0xbd, 0x66, + 0xe6, 0x82, 0x7d, 0xca, 0xb9, 0x1f, 0x42, 0x4d, 0x71, 0xa6, 0x8a, 0xe5, 0x0a, 0xdb, 0x2a, 0xcc, + 0xac, 0xb2, 0xeb, 0xfe, 0x5f, 0x37, 0x00, 0xce, 0x17, 0x91, 0x60, 0xf3, 0xdf, 0xb8, 0xec, 0x15, + 0xd9, 0x85, 0x9e, 0xfe, 0xa1, 0x04, 0xdf, 0x6f, 0x92, 0x84, 0x72, 0x31, 0xc1, 0x2e, 0x30, 0xa5, + 0xde, 0x47, 0xd0, 0x3a, 0xa5, 0x37, 0xaf, 0x63, 0x57, 0xd7, 0x84, 0x9c, 0xb7, 0xc1, 0x8a, 0x52, + 0x20, 0xea, 0x9f, 0x42, 0x6f, 0x89, 0x8e, 0xf3, 0xf6, 0xf8, 0x1b, 0xc7, 0x5a, 0xba, 0x7e, 0x26, + 0x9f, 0x30, 0x45, 0x4a, 0xce, 0x4f, 0xd4, 0xcf, 0xa9, 0x75, 0x9c, 0xfd, 0xa2, 0xf8, 0xf8, 0xc1, + 0x77, 0xa7, 0xb1, 0xcc, 0x9a, 0x09, 0x67, 0x0f, 0xef, 0xae, 0xd3, 0xa4, 0x99, 0x97, 0x27, 0xce, + 0x95, 0xcc, 0x5b, 0x65, 0xd5, 0xf7, 0x00, 0x32, 0xee, 0xcc, 0xdb, 0xe3, 0xf5, 0x2e, 0xd3, 0xea, + 0x53, 0x80, 0x8c, 0x11, 0x15, 0x2a, 0x8a, 0x84, 0xaa, 0xa6, 0x2d, 0xb3, 0xe6, 0x2e, 0x34, 0x53, + 0x16, 0xcb, 0xaf, 0x81, 0x0e, 0x8a, 0xa4, 0xf8, 0xf1, 0xee, 0xef, 0x77, 0x66, 0xae, 0xb8, 0x8c, + 0xa7, 0x23, 0x3b, 0x98, 0xef, 0x5d, 0xd2, 0xe8, 0xd2, 0xb5, 0x03, 0x1e, 0xee, 0x5d, 0x4b, 0x30, + 0xec, 0x15, 0xfe, 0xb5, 0x9a, 0xd6, 0xf0, 0xf5, 0xf6, 0xe4, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x66, 0x13, 0x8f, 0x1a, 0xcd, 0x1a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BackendClient is the client API for Backend service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BackendClient interface { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) + // Type returns the BackendType for the particular backend + Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) +} + +type backendClient struct { + cc *grpc.ClientConn +} + +func NewBackendClient(cc *grpc.ClientConn) BackendClient { + return &backendClient{cc} +} + +func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) { + out := new(HandleRequestReply) + err := c.cc.Invoke(ctx, "/pb.Backend/HandleRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) { + out := new(SpecialPathsReply) + err := c.cc.Invoke(ctx, "/pb.Backend/SpecialPaths", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) { + out := new(HandleExistenceCheckReply) + err := c.cc.Invoke(ctx, "/pb.Backend/HandleExistenceCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.Backend/Cleanup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.Backend/InvalidateKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) { + out := new(SetupReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Setup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) { + out := new(InitializeReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) { + out := new(TypeReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Type", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackendServer is the server API for Backend service. +type BackendServer interface { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + HandleRequest(context.Context, *HandleRequestArgs) (*HandleRequestReply, error) + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths(context.Context, *Empty) (*SpecialPathsReply, error) + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *HandleExistenceCheckArgs) (*HandleExistenceCheckReply, error) + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + Cleanup(context.Context, *Empty) (*Empty, error) + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, *InvalidateKeyArgs) (*Empty, error) + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + Setup(context.Context, *SetupArgs) (*SetupReply, error) + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + Initialize(context.Context, *InitializeArgs) (*InitializeReply, error) + // Type returns the BackendType for the particular backend + Type(context.Context, *Empty) (*TypeReply, error) +} + +// UnimplementedBackendServer can be embedded to have forward compatible implementations. +type UnimplementedBackendServer struct { +} + +func (*UnimplementedBackendServer) HandleRequest(ctx context.Context, req *HandleRequestArgs) (*HandleRequestReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleRequest not implemented") +} +func (*UnimplementedBackendServer) SpecialPaths(ctx context.Context, req *Empty) (*SpecialPathsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SpecialPaths not implemented") +} +func (*UnimplementedBackendServer) HandleExistenceCheck(ctx context.Context, req *HandleExistenceCheckArgs) (*HandleExistenceCheckReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleExistenceCheck not implemented") +} +func (*UnimplementedBackendServer) Cleanup(ctx context.Context, req *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") +} +func (*UnimplementedBackendServer) InvalidateKey(ctx context.Context, req *InvalidateKeyArgs) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method InvalidateKey not implemented") +} +func (*UnimplementedBackendServer) Setup(ctx context.Context, req *SetupArgs) (*SetupReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Setup not implemented") +} +func (*UnimplementedBackendServer) Initialize(ctx context.Context, req *InitializeArgs) (*InitializeReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (*UnimplementedBackendServer) Type(ctx context.Context, req *Empty) (*TypeReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Type not implemented") +} + +func RegisterBackendServer(s *grpc.Server, srv BackendServer) { + s.RegisterService(&_Backend_serviceDesc, srv) +} + +func _Backend_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HandleRequestArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).HandleRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/HandleRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).HandleRequest(ctx, req.(*HandleRequestArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_SpecialPaths_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).SpecialPaths(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/SpecialPaths", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).SpecialPaths(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_HandleExistenceCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HandleExistenceCheckArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).HandleExistenceCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/HandleExistenceCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).HandleExistenceCheck(ctx, req.(*HandleExistenceCheckArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Cleanup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Cleanup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Cleanup(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_InvalidateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvalidateKeyArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).InvalidateKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/InvalidateKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).InvalidateKey(ctx, req.(*InvalidateKeyArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Setup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetupArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Setup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Setup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Setup(ctx, req.(*SetupArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Initialize(ctx, req.(*InitializeArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Type(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Type", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Type(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Backend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Backend", + HandlerType: (*BackendServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "HandleRequest", + Handler: _Backend_HandleRequest_Handler, + }, + { + MethodName: "SpecialPaths", + Handler: _Backend_SpecialPaths_Handler, + }, + { + MethodName: "HandleExistenceCheck", + Handler: _Backend_HandleExistenceCheck_Handler, + }, + { + MethodName: "Cleanup", + Handler: _Backend_Cleanup_Handler, + }, + { + MethodName: "InvalidateKey", + Handler: _Backend_InvalidateKey_Handler, + }, + { + MethodName: "Setup", + Handler: _Backend_Setup_Handler, + }, + { + MethodName: "Initialize", + Handler: _Backend_Initialize_Handler, + }, + { + MethodName: "Type", + Handler: _Backend_Type_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +// StorageClient is the client API for Storage service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StorageClient interface { + List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) + Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) + Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) + Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) +} + +type storageClient struct { + cc *grpc.ClientConn +} + +func NewStorageClient(cc *grpc.ClientConn) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) { + out := new(StorageListReply) + err := c.cc.Invoke(ctx, "/pb.Storage/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) { + out := new(StorageGetReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) { + out := new(StoragePutReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Put", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) { + out := new(StorageDeleteReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageServer is the server API for Storage service. +type StorageServer interface { + List(context.Context, *StorageListArgs) (*StorageListReply, error) + Get(context.Context, *StorageGetArgs) (*StorageGetReply, error) + Put(context.Context, *StoragePutArgs) (*StoragePutReply, error) + Delete(context.Context, *StorageDeleteArgs) (*StorageDeleteReply, error) +} + +// UnimplementedStorageServer can be embedded to have forward compatible implementations. +type UnimplementedStorageServer struct { +} + +func (*UnimplementedStorageServer) List(ctx context.Context, req *StorageListArgs) (*StorageListReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (*UnimplementedStorageServer) Get(ctx context.Context, req *StorageGetArgs) (*StorageGetReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedStorageServer) Put(ctx context.Context, req *StoragePutArgs) (*StoragePutReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (*UnimplementedStorageServer) Delete(ctx context.Context, req *StorageDeleteArgs) (*StorageDeleteReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} + +func RegisterStorageServer(s *grpc.Server, srv StorageServer) { + s.RegisterService(&_Storage_serviceDesc, srv) +} + +func _Storage_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageListArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).List(ctx, req.(*StorageListArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageGetArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Get(ctx, req.(*StorageGetArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoragePutArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Put(ctx, req.(*StoragePutArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageDeleteArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Delete(ctx, req.(*StorageDeleteArgs)) + } + return interceptor(ctx, in, info, handler) +} + +var _Storage_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Storage", + HandlerType: (*StorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "List", + Handler: _Storage_List_Handler, + }, + { + MethodName: "Get", + Handler: _Storage_Get_Handler, + }, + { + MethodName: "Put", + Handler: _Storage_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _Storage_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +// SystemViewClient is the client API for SystemView service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SystemViewClient interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) + // ReplicationState indicates the state of cluster replication + ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) + // EntityInfo returns the basic entity information for the given entity id + EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) + // PluginEnv returns Vault environment information used by plugins + PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) +} + +type systemViewClient struct { + cc *grpc.ClientConn +} + +func NewSystemViewClient(cc *grpc.ClientConn) SystemViewClient { + return &systemViewClient{cc} +} + +func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + out := new(TTLReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/DefaultLeaseTTL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + out := new(TTLReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/MaxLeaseTTL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) { + out := new(TaintedReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/Tainted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) { + out := new(CachingDisabledReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/CachingDisabled", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) { + out := new(ReplicationStateReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/ReplicationState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) { + out := new(ResponseWrapDataReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/ResponseWrapData", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) { + out := new(MlockEnabledReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/MlockEnabled", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) { + out := new(LocalMountReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/LocalMount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) { + out := new(EntityInfoReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/EntityInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) { + out := new(PluginEnvReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/PluginEnv", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SystemViewServer is the server API for SystemView service. +type SystemViewServer interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL(context.Context, *Empty) (*TTLReply, error) + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL(context.Context, *Empty) (*TTLReply, error) + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted(context.Context, *Empty) (*TaintedReply, error) + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + CachingDisabled(context.Context, *Empty) (*CachingDisabledReply, error) + // ReplicationState indicates the state of cluster replication + ReplicationState(context.Context, *Empty) (*ReplicationStateReply, error) + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(context.Context, *ResponseWrapDataArgs) (*ResponseWrapDataReply, error) + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled(context.Context, *Empty) (*MlockEnabledReply, error) + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + LocalMount(context.Context, *Empty) (*LocalMountReply, error) + // EntityInfo returns the basic entity information for the given entity id + EntityInfo(context.Context, *EntityInfoArgs) (*EntityInfoReply, error) + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context, *Empty) (*PluginEnvReply, error) +} + +// UnimplementedSystemViewServer can be embedded to have forward compatible implementations. +type UnimplementedSystemViewServer struct { +} + +func (*UnimplementedSystemViewServer) DefaultLeaseTTL(ctx context.Context, req *Empty) (*TTLReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DefaultLeaseTTL not implemented") +} +func (*UnimplementedSystemViewServer) MaxLeaseTTL(ctx context.Context, req *Empty) (*TTLReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method MaxLeaseTTL not implemented") +} +func (*UnimplementedSystemViewServer) Tainted(ctx context.Context, req *Empty) (*TaintedReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Tainted not implemented") +} +func (*UnimplementedSystemViewServer) CachingDisabled(ctx context.Context, req *Empty) (*CachingDisabledReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CachingDisabled not implemented") +} +func (*UnimplementedSystemViewServer) ReplicationState(ctx context.Context, req *Empty) (*ReplicationStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplicationState not implemented") +} +func (*UnimplementedSystemViewServer) ResponseWrapData(ctx context.Context, req *ResponseWrapDataArgs) (*ResponseWrapDataReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResponseWrapData not implemented") +} +func (*UnimplementedSystemViewServer) MlockEnabled(ctx context.Context, req *Empty) (*MlockEnabledReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method MlockEnabled not implemented") +} +func (*UnimplementedSystemViewServer) LocalMount(ctx context.Context, req *Empty) (*LocalMountReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocalMount not implemented") +} +func (*UnimplementedSystemViewServer) EntityInfo(ctx context.Context, req *EntityInfoArgs) (*EntityInfoReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method EntityInfo not implemented") +} +func (*UnimplementedSystemViewServer) PluginEnv(ctx context.Context, req *Empty) (*PluginEnvReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PluginEnv not implemented") +} + +func RegisterSystemViewServer(s *grpc.Server, srv SystemViewServer) { + s.RegisterService(&_SystemView_serviceDesc, srv) +} + +func _SystemView_DefaultLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).DefaultLeaseTTL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/DefaultLeaseTTL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).DefaultLeaseTTL(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_MaxLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).MaxLeaseTTL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/MaxLeaseTTL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).MaxLeaseTTL(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_Tainted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).Tainted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/Tainted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).Tainted(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_CachingDisabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).CachingDisabled(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/CachingDisabled", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).CachingDisabled(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_ReplicationState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ReplicationState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/ReplicationState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ReplicationState(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_ResponseWrapData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResponseWrapDataArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ResponseWrapData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/ResponseWrapData", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ResponseWrapData(ctx, req.(*ResponseWrapDataArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_MlockEnabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).MlockEnabled(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/MlockEnabled", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).MlockEnabled(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_LocalMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).LocalMount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/LocalMount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).LocalMount(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_EntityInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntityInfoArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).EntityInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/EntityInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).EntityInfo(ctx, req.(*EntityInfoArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_PluginEnv_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).PluginEnv(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/PluginEnv", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).PluginEnv(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _SystemView_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.SystemView", + HandlerType: (*SystemViewServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DefaultLeaseTTL", + Handler: _SystemView_DefaultLeaseTTL_Handler, + }, + { + MethodName: "MaxLeaseTTL", + Handler: _SystemView_MaxLeaseTTL_Handler, + }, + { + MethodName: "Tainted", + Handler: _SystemView_Tainted_Handler, + }, + { + MethodName: "CachingDisabled", + Handler: _SystemView_CachingDisabled_Handler, + }, + { + MethodName: "ReplicationState", + Handler: _SystemView_ReplicationState_Handler, + }, + { + MethodName: "ResponseWrapData", + Handler: _SystemView_ResponseWrapData_Handler, + }, + { + MethodName: "MlockEnabled", + Handler: _SystemView_MlockEnabled_Handler, + }, + { + MethodName: "LocalMount", + Handler: _SystemView_LocalMount_Handler, + }, + { + MethodName: "EntityInfo", + Handler: _SystemView_EntityInfo_Handler, + }, + { + MethodName: "PluginEnv", + Handler: _SystemView_PluginEnv_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.proto b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.proto new file mode 100644 index 00000000..5b23d5e2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/backend.proto @@ -0,0 +1,602 @@ +syntax = "proto3"; +package pb; + +option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; + +import "google/protobuf/timestamp.proto"; +import "sdk/logical/identity.proto"; +import "sdk/logical/plugin.proto"; + +message Empty {} + +message Header { + repeated string header = 1; +} + +message ProtoError { + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + uint32 err_type = 1; + string err_msg = 2; + int64 err_code = 3; +} + +// Paths is the structure of special paths that is used for SpecialPaths. +message Paths { + // Root are the paths that require a root token to access + repeated string root = 1; + + // Unauthenticated are the paths that can be accessed without any auth. + repeated string unauthenticated = 2; + + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + repeated string local_storage = 3; + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + repeated string seal_wrap_storage = 4; +} + +message Request { + // Id is the uuid associated with each request + string id = 1; + + // If set, the name given to the replication secondary where this request + // originated + string ReplicationCluster = 2; + + // Operation is the requested operation type + string operation = 3; + + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + string path = 4; + + // Request data is a JSON object that must have keys with string type. + string data = 5; + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret secret = 6; + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth auth = 7; + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 8; + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + string client_token = 9; + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + string client_token_accessor = 10; + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + string display_name = 11; + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + string mount_point = 12; + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + string mount_type = 13; + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + string mount_accessor = 14; + + // WrapInfo contains requested response wrapping parameters + RequestWrapInfo wrap_info = 15; + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + int64 client_token_remaining_uses = 16; + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + string entity_id = 17; + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + bool policy_override = 18; + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + bool unauthenticated = 19; + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection connection = 20; +} + +message Auth { + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + string display_name = 3; + + // Policies is the list of policies that the authenticated user + // is associated with. + repeated string policies = 4; + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + map metadata = 5; + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + string client_token = 6; + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + string accessor = 7; + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + int64 period = 8; + + // Number of allowed uses of the issued token + int64 num_uses = 9; + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + string entity_id = 10; + + // Alias is the information about the authenticated client returned by + // the auth backend + logical.Alias alias = 11; + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + repeated logical.Alias group_aliases = 12; + + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + repeated string bound_cidrs = 13; + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + repeated string token_policies = 14; + repeated string identity_policies = 15; + + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + int64 explicit_max_ttl = 16; + + // TokenType is the type of token being requested + uint32 token_type = 17; + + // Whether the default policy should be added automatically by core + bool no_default_policy = 18; +} + +message TokenEntry { + string id = 1; + string accessor = 2; + string parent = 3; + repeated string policies = 4; + string path = 5; + map meta = 6; + string display_name = 7; + int64 num_uses = 8; + int64 creation_time = 9; + int64 ttl = 10; + int64 explicit_max_ttl = 11; + string role = 12; + int64 period = 13; + string entity_id = 14; + repeated string bound_cidrs = 15; + string namespace_id = 16; + string cubbyhole_id = 17; + uint32 type = 18; +} + +message LeaseOptions { + int64 TTL = 1; + + bool renewable = 2; + + int64 increment = 3; + + google.protobuf.Timestamp issue_time = 4; + + int64 MaxTTL = 5; +} + +message Secret { + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + string lease_id = 3; +} + +message Response { + // Secret, if not nil, denotes that this response represents a secret. + Secret secret = 1; + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth auth = 2; + + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + string data = 3; + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + string redirect = 4; + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + repeated string warnings = 5; + + // Information for wrapping the response in a cubbyhole + ResponseWrapInfo wrap_info = 6; + + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 7; +} + +message ResponseWrapInfo { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; + + // The token containing the wrapped response + string token = 2; + + // The token accessor for the wrapped response token + string accessor = 3; + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + google.protobuf.Timestamp creation_time = 4; + + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + string wrapped_accessor = 5; + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + string wrapped_entity_id = 6; + + // The format to use. This doesn't get returned, it's only internal. + string format = 7; + + // CreationPath is the original request path that was used to create + // the wrapped response. + string creation_path = 8; + + // Controls seal wrapping behavior downstream for specific use cases + bool seal_wrap = 9; +} + +message RequestWrapInfo { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; + + // The format to use for the wrapped response; if not specified it's a bare + // token + string format = 2; + + // A flag to conforming backends that data for a given request should be + // seal wrapped + bool seal_wrap = 3; +} + +// HandleRequestArgs is the args for HandleRequest method. +message HandleRequestArgs { + uint32 storage_id = 1; + Request request = 2; +} + +// HandleRequestReply is the reply for HandleRequest method. +message HandleRequestReply { + Response response = 1; + ProtoError err = 2; +} + +// InitializeArgs is the args for Initialize method. +message InitializeArgs { +} + +// InitializeReply is the reply for Initialize method. +message InitializeReply { + ProtoError err = 1; +} + +// SpecialPathsReply is the reply for SpecialPaths method. +message SpecialPathsReply { + Paths paths = 1; +} + +// HandleExistenceCheckArgs is the args for HandleExistenceCheck method. +message HandleExistenceCheckArgs { + uint32 storage_id = 1; + Request request = 2; +} + +// HandleExistenceCheckReply is the reply for HandleExistenceCheck method. +message HandleExistenceCheckReply { + bool check_found = 1; + bool exists = 2; + ProtoError err = 3; +} + +// SetupArgs is the args for Setup method. +message SetupArgs { + uint32 broker_id = 1; + map Config = 2; + string backendUUID = 3; +} + +// SetupReply is the reply for Setup method. +message SetupReply { + string err = 1; +} + +// TypeReply is the reply for the Type method. +message TypeReply { + uint32 type = 1; +} + +message InvalidateKeyArgs { + string key = 1; +} + +// Backend is the interface that plugins must satisfy. The plugin should +// implement the server for this service. Requests will first run the +// HandleExistenceCheck rpc then run the HandleRequests rpc. +service Backend { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + rpc SpecialPaths(Empty) returns (SpecialPathsReply); + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + rpc Cleanup(Empty) returns (Empty); + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); + + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + rpc Setup(SetupArgs) returns (SetupReply); + + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + rpc Initialize(InitializeArgs) returns (InitializeReply); + + // Type returns the BackendType for the particular backend + rpc Type(Empty) returns (TypeReply); +} + +message StorageEntry { + string key = 1; + bytes value = 2; + bool seal_wrap = 3; +} + +message StorageListArgs { + string prefix = 1; +} + +message StorageListReply { + repeated string keys = 1; + string err = 2; +} + +message StorageGetArgs { + string key = 1; +} + +message StorageGetReply { + StorageEntry entry = 1; + string err = 2; +} + +message StoragePutArgs { + StorageEntry entry = 1; +} + +message StoragePutReply { + string err = 1; +} + +message StorageDeleteArgs { + string key = 1; +} + +message StorageDeleteReply { + string err = 1; +} + +// Storage is the way that plugins are able read/write data. Plugins should +// implement the client for this service. +service Storage { + rpc List(StorageListArgs) returns (StorageListReply); + rpc Get(StorageGetArgs) returns (StorageGetReply); + rpc Put(StoragePutArgs) returns (StoragePutReply); + rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); +} + +message TTLReply { + int64 TTL = 1; +} + +message TaintedReply { + bool tainted = 1; +} + +message CachingDisabledReply { + bool disabled = 1; +} + +message ReplicationStateReply { + int32 state = 1; +} + +message ResponseWrapDataArgs { + string data = 1; + int64 TTL = 2; + bool JWT = 3; +} + +message ResponseWrapDataReply { + ResponseWrapInfo wrap_info = 1; + string err = 2; +} + +message MlockEnabledReply { + bool enabled = 1; +} + +message LocalMountReply { + bool local = 1; +} + +message EntityInfoArgs { + string entity_id = 1; +} + +message EntityInfoReply { + logical.Entity entity = 1; + string err = 2; +} + +message PluginEnvReply { + logical.PluginEnvironment plugin_environment = 1; + string err = 2; +} + +// SystemView exposes system configuration information in a safe way for plugins +// to consume. Plugins should implement the client for this service. +service SystemView { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + rpc DefaultLeaseTTL(Empty) returns (TTLReply); + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + rpc MaxLeaseTTL(Empty) returns (TTLReply); + + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + rpc Tainted(Empty) returns (TaintedReply); + + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + rpc CachingDisabled(Empty) returns (CachingDisabledReply); + + // ReplicationState indicates the state of cluster replication + rpc ReplicationState(Empty) returns (ReplicationStateReply); + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + rpc MlockEnabled(Empty) returns (MlockEnabledReply); + + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + rpc LocalMount(Empty) returns (LocalMountReply); + + // EntityInfo returns the basic entity information for the given entity id + rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); + + // PluginEnv returns Vault environment information used by plugins + rpc PluginEnv(Empty) returns (PluginEnvReply); +} + +message Connection { + // RemoteAddr is the network address that sent the request. + string remote_addr = 1; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/pb/translation.go b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/translation.go new file mode 100644 index 00000000..23c7e718 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/pb/translation.go @@ -0,0 +1,639 @@ +package pb + +import ( + "encoding/json" + "errors" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + ErrTypeUnknown uint32 = iota + ErrTypeUserError + ErrTypeInternalError + ErrTypeCodedError + ErrTypeStatusBadRequest + ErrTypeUnsupportedOperation + ErrTypeUnsupportedPath + ErrTypeInvalidRequest + ErrTypePermissionDenied + ErrTypeMultiAuthzPending +) + +func ProtoErrToErr(e *ProtoError) error { + if e == nil { + return nil + } + + var err error + switch e.ErrType { + case ErrTypeUnknown: + err = errors.New(e.ErrMsg) + case ErrTypeUserError: + err = errutil.UserError{Err: e.ErrMsg} + case ErrTypeInternalError: + err = errutil.InternalError{Err: e.ErrMsg} + case ErrTypeCodedError: + err = logical.CodedError(int(e.ErrCode), e.ErrMsg) + case ErrTypeStatusBadRequest: + err = &logical.StatusBadRequest{Err: e.ErrMsg} + case ErrTypeUnsupportedOperation: + err = logical.ErrUnsupportedOperation + case ErrTypeUnsupportedPath: + err = logical.ErrUnsupportedPath + case ErrTypeInvalidRequest: + err = logical.ErrInvalidRequest + case ErrTypePermissionDenied: + err = logical.ErrPermissionDenied + case ErrTypeMultiAuthzPending: + err = logical.ErrMultiAuthzPending + } + + return err +} + +func ErrToProtoErr(e error) *ProtoError { + if e == nil { + return nil + } + pbErr := &ProtoError{ + ErrMsg: e.Error(), + ErrType: ErrTypeUnknown, + } + + switch e.(type) { + case errutil.UserError: + pbErr.ErrType = ErrTypeUserError + case errutil.InternalError: + pbErr.ErrType = ErrTypeInternalError + case logical.HTTPCodedError: + pbErr.ErrType = ErrTypeCodedError + pbErr.ErrCode = int64(e.(logical.HTTPCodedError).Code()) + case *logical.StatusBadRequest: + pbErr.ErrType = ErrTypeStatusBadRequest + } + + switch { + case e == logical.ErrUnsupportedOperation: + pbErr.ErrType = ErrTypeUnsupportedOperation + case e == logical.ErrUnsupportedPath: + pbErr.ErrType = ErrTypeUnsupportedPath + case e == logical.ErrInvalidRequest: + pbErr.ErrType = ErrTypeInvalidRequest + case e == logical.ErrPermissionDenied: + pbErr.ErrType = ErrTypePermissionDenied + case e == logical.ErrMultiAuthzPending: + pbErr.ErrType = ErrTypeMultiAuthzPending + } + + return pbErr +} + +func ErrToString(e error) string { + if e == nil { + return "" + } + + return e.Error() +} + +func LogicalStorageEntryToProtoStorageEntry(e *logical.StorageEntry) *StorageEntry { + if e == nil { + return nil + } + + return &StorageEntry{ + Key: e.Key, + Value: e.Value, + SealWrap: e.SealWrap, + } +} + +func ProtoStorageEntryToLogicalStorageEntry(e *StorageEntry) *logical.StorageEntry { + if e == nil { + return nil + } + + return &logical.StorageEntry{ + Key: e.Key, + Value: e.Value, + SealWrap: e.SealWrap, + } +} + +func ProtoLeaseOptionsToLogicalLeaseOptions(l *LeaseOptions) (logical.LeaseOptions, error) { + if l == nil { + return logical.LeaseOptions{}, nil + } + + t, err := ptypes.Timestamp(l.IssueTime) + return logical.LeaseOptions{ + TTL: time.Duration(l.TTL), + Renewable: l.Renewable, + Increment: time.Duration(l.Increment), + IssueTime: t, + MaxTTL: time.Duration(l.MaxTTL), + }, err +} + +func LogicalLeaseOptionsToProtoLeaseOptions(l logical.LeaseOptions) (*LeaseOptions, error) { + t, err := ptypes.TimestampProto(l.IssueTime) + if err != nil { + return nil, err + } + + return &LeaseOptions{ + TTL: int64(l.TTL), + Renewable: l.Renewable, + Increment: int64(l.Increment), + IssueTime: t, + MaxTTL: int64(l.MaxTTL), + }, err +} + +func ProtoSecretToLogicalSecret(s *Secret) (*logical.Secret, error) { + if s == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(s.InternalData), &data) + if err != nil { + return nil, err + } + + lease, err := ProtoLeaseOptionsToLogicalLeaseOptions(s.LeaseOptions) + if err != nil { + return nil, err + } + + return &logical.Secret{ + LeaseOptions: lease, + InternalData: data, + LeaseID: s.LeaseID, + }, nil +} + +func LogicalSecretToProtoSecret(s *logical.Secret) (*Secret, error) { + if s == nil { + return nil, nil + } + + buf, err := json.Marshal(s.InternalData) + if err != nil { + return nil, err + } + + lease, err := LogicalLeaseOptionsToProtoLeaseOptions(s.LeaseOptions) + if err != nil { + return nil, err + } + + return &Secret{ + LeaseOptions: lease, + InternalData: string(buf[:]), + LeaseID: s.LeaseID, + }, err +} + +func LogicalRequestToProtoRequest(r *logical.Request) (*Request, error) { + if r == nil { + return nil, nil + } + + buf, err := json.Marshal(r.Data) + if err != nil { + return nil, err + } + + secret, err := LogicalSecretToProtoSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := LogicalAuthToProtoAuth(r.Auth) + if err != nil { + return nil, err + } + + headers := map[string]*Header{} + for k, v := range r.Headers { + headers[k] = &Header{Header: v} + } + + return &Request{ + ID: r.ID, + ReplicationCluster: r.ReplicationCluster, + Operation: string(r.Operation), + Path: r.Path, + Data: string(buf[:]), + Secret: secret, + Auth: auth, + Headers: headers, + ClientToken: r.ClientToken, + ClientTokenAccessor: r.ClientTokenAccessor, + DisplayName: r.DisplayName, + MountPoint: r.MountPoint, + MountType: r.MountType, + MountAccessor: r.MountAccessor, + WrapInfo: LogicalRequestWrapInfoToProtoRequestWrapInfo(r.WrapInfo), + ClientTokenRemainingUses: int64(r.ClientTokenRemainingUses), + Connection: LogicalConnectionToProtoConnection(r.Connection), + EntityID: r.EntityID, + PolicyOverride: r.PolicyOverride, + Unauthenticated: r.Unauthenticated, + }, nil +} + +func ProtoRequestToLogicalRequest(r *Request) (*logical.Request, error) { + if r == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(r.Data), &data) + if err != nil { + return nil, err + } + + secret, err := ProtoSecretToLogicalSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := ProtoAuthToLogicalAuth(r.Auth) + if err != nil { + return nil, err + } + + var headers map[string][]string + if len(r.Headers) > 0 { + headers = make(map[string][]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v.Header + } + } + + return &logical.Request{ + ID: r.ID, + ReplicationCluster: r.ReplicationCluster, + Operation: logical.Operation(r.Operation), + Path: r.Path, + Data: data, + Secret: secret, + Auth: auth, + Headers: headers, + ClientToken: r.ClientToken, + ClientTokenAccessor: r.ClientTokenAccessor, + DisplayName: r.DisplayName, + MountPoint: r.MountPoint, + MountType: r.MountType, + MountAccessor: r.MountAccessor, + WrapInfo: ProtoRequestWrapInfoToLogicalRequestWrapInfo(r.WrapInfo), + ClientTokenRemainingUses: int(r.ClientTokenRemainingUses), + Connection: ProtoConnectionToLogicalConnection(r.Connection), + EntityID: r.EntityID, + PolicyOverride: r.PolicyOverride, + Unauthenticated: r.Unauthenticated, + }, nil +} + +func LogicalConnectionToProtoConnection(c *logical.Connection) *Connection { + if c == nil { + return nil + } + + return &Connection{ + RemoteAddr: c.RemoteAddr, + } +} + +func ProtoConnectionToLogicalConnection(c *Connection) *logical.Connection { + if c == nil { + return nil + } + + return &logical.Connection{ + RemoteAddr: c.RemoteAddr, + } +} + +func LogicalRequestWrapInfoToProtoRequestWrapInfo(i *logical.RequestWrapInfo) *RequestWrapInfo { + if i == nil { + return nil + } + + return &RequestWrapInfo{ + TTL: int64(i.TTL), + Format: i.Format, + SealWrap: i.SealWrap, + } +} + +func ProtoRequestWrapInfoToLogicalRequestWrapInfo(i *RequestWrapInfo) *logical.RequestWrapInfo { + if i == nil { + return nil + } + + return &logical.RequestWrapInfo{ + TTL: time.Duration(i.TTL), + Format: i.Format, + SealWrap: i.SealWrap, + } +} + +func ProtoResponseToLogicalResponse(r *Response) (*logical.Response, error) { + if r == nil { + return nil, nil + } + + secret, err := ProtoSecretToLogicalSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := ProtoAuthToLogicalAuth(r.Auth) + if err != nil { + return nil, err + } + + data := map[string]interface{}{} + err = json.Unmarshal([]byte(r.Data), &data) + if err != nil { + return nil, err + } + + wrapInfo, err := ProtoResponseWrapInfoToLogicalResponseWrapInfo(r.WrapInfo) + if err != nil { + return nil, err + } + + var headers map[string][]string + if len(r.Headers) > 0 { + headers = make(map[string][]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v.Header + } + } + + return &logical.Response{ + Secret: secret, + Auth: auth, + Data: data, + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + }, nil +} + +func ProtoResponseWrapInfoToLogicalResponseWrapInfo(i *ResponseWrapInfo) (*wrapping.ResponseWrapInfo, error) { + if i == nil { + return nil, nil + } + + t, err := ptypes.Timestamp(i.CreationTime) + if err != nil { + return nil, err + } + + return &wrapping.ResponseWrapInfo{ + TTL: time.Duration(i.TTL), + Token: i.Token, + Accessor: i.Accessor, + CreationTime: t, + WrappedAccessor: i.WrappedAccessor, + WrappedEntityID: i.WrappedEntityID, + Format: i.Format, + CreationPath: i.CreationPath, + SealWrap: i.SealWrap, + }, nil +} + +func LogicalResponseWrapInfoToProtoResponseWrapInfo(i *wrapping.ResponseWrapInfo) (*ResponseWrapInfo, error) { + if i == nil { + return nil, nil + } + + t, err := ptypes.TimestampProto(i.CreationTime) + if err != nil { + return nil, err + } + + return &ResponseWrapInfo{ + TTL: int64(i.TTL), + Token: i.Token, + Accessor: i.Accessor, + CreationTime: t, + WrappedAccessor: i.WrappedAccessor, + WrappedEntityID: i.WrappedEntityID, + Format: i.Format, + CreationPath: i.CreationPath, + SealWrap: i.SealWrap, + }, nil +} + +func LogicalResponseToProtoResponse(r *logical.Response) (*Response, error) { + if r == nil { + return nil, nil + } + + secret, err := LogicalSecretToProtoSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := LogicalAuthToProtoAuth(r.Auth) + if err != nil { + return nil, err + } + + buf, err := json.Marshal(r.Data) + if err != nil { + return nil, err + } + + wrapInfo, err := LogicalResponseWrapInfoToProtoResponseWrapInfo(r.WrapInfo) + if err != nil { + return nil, err + } + + headers := map[string]*Header{} + for k, v := range r.Headers { + headers[k] = &Header{Header: v} + } + + return &Response{ + Secret: secret, + Auth: auth, + Data: string(buf[:]), + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + }, nil +} + +func LogicalAuthToProtoAuth(a *logical.Auth) (*Auth, error) { + if a == nil { + return nil, nil + } + + buf, err := json.Marshal(a.InternalData) + if err != nil { + return nil, err + } + + lo, err := LogicalLeaseOptionsToProtoLeaseOptions(a.LeaseOptions) + if err != nil { + return nil, err + } + + boundCIDRs := make([]string, len(a.BoundCIDRs)) + for i, cidr := range a.BoundCIDRs { + boundCIDRs[i] = cidr.String() + } + + return &Auth{ + LeaseOptions: lo, + TokenType: uint32(a.TokenType), + InternalData: string(buf[:]), + DisplayName: a.DisplayName, + Policies: a.Policies, + TokenPolicies: a.TokenPolicies, + IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, + Metadata: a.Metadata, + ClientToken: a.ClientToken, + Accessor: a.Accessor, + Period: int64(a.Period), + NumUses: int64(a.NumUses), + EntityID: a.EntityID, + Alias: a.Alias, + GroupAliases: a.GroupAliases, + BoundCIDRs: boundCIDRs, + ExplicitMaxTTL: int64(a.ExplicitMaxTTL), + }, nil +} + +func ProtoAuthToLogicalAuth(a *Auth) (*logical.Auth, error) { + if a == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(a.InternalData), &data) + if err != nil { + return nil, err + } + + lo, err := ProtoLeaseOptionsToLogicalLeaseOptions(a.LeaseOptions) + if err != nil { + return nil, err + } + + boundCIDRs, err := parseutil.ParseAddrs(a.BoundCIDRs) + if err != nil { + return nil, err + } + if len(boundCIDRs) == 0 { + // On inbound auths, if auth.BoundCIDRs is empty, it will be nil. + // Let's match that behavior outbound. + boundCIDRs = nil + } + + return &logical.Auth{ + LeaseOptions: lo, + TokenType: logical.TokenType(a.TokenType), + InternalData: data, + DisplayName: a.DisplayName, + Policies: a.Policies, + TokenPolicies: a.TokenPolicies, + IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, + Metadata: a.Metadata, + ClientToken: a.ClientToken, + Accessor: a.Accessor, + Period: time.Duration(a.Period), + NumUses: int(a.NumUses), + EntityID: a.EntityID, + Alias: a.Alias, + GroupAliases: a.GroupAliases, + BoundCIDRs: boundCIDRs, + ExplicitMaxTTL: time.Duration(a.ExplicitMaxTTL), + }, nil +} + +func LogicalTokenEntryToProtoTokenEntry(t *logical.TokenEntry) *TokenEntry { + if t == nil { + return nil + } + + boundCIDRs := make([]string, len(t.BoundCIDRs)) + for i, cidr := range t.BoundCIDRs { + boundCIDRs[i] = cidr.String() + } + + return &TokenEntry{ + ID: t.ID, + Accessor: t.Accessor, + Parent: t.Parent, + Policies: t.Policies, + Path: t.Path, + Meta: t.Meta, + DisplayName: t.DisplayName, + NumUses: int64(t.NumUses), + CreationTime: t.CreationTime, + TTL: int64(t.TTL), + ExplicitMaxTTL: int64(t.ExplicitMaxTTL), + Role: t.Role, + Period: int64(t.Period), + EntityID: t.EntityID, + BoundCIDRs: boundCIDRs, + NamespaceID: t.NamespaceID, + CubbyholeID: t.CubbyholeID, + Type: uint32(t.Type), + } +} + +func ProtoTokenEntryToLogicalTokenEntry(t *TokenEntry) (*logical.TokenEntry, error) { + if t == nil { + return nil, nil + } + + boundCIDRs, err := parseutil.ParseAddrs(t.BoundCIDRs) + if err != nil { + return nil, err + } + if len(boundCIDRs) == 0 { + // On inbound auths, if auth.BoundCIDRs is empty, it will be nil. + // Let's match that behavior outbound. + boundCIDRs = nil + } + + return &logical.TokenEntry{ + ID: t.ID, + Accessor: t.Accessor, + Parent: t.Parent, + Policies: t.Policies, + Path: t.Path, + Meta: t.Meta, + DisplayName: t.DisplayName, + NumUses: int(t.NumUses), + CreationTime: t.CreationTime, + TTL: time.Duration(t.TTL), + ExplicitMaxTTL: time.Duration(t.ExplicitMaxTTL), + Role: t.Role, + Period: time.Duration(t.Period), + EntityID: t.EntityID, + BoundCIDRs: boundCIDRs, + NamespaceID: t.NamespaceID, + CubbyholeID: t.CubbyholeID, + Type: logical.TokenType(t.Type), + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go b/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go new file mode 100644 index 00000000..f12a3692 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go @@ -0,0 +1,159 @@ +package plugin + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// BackendPluginClient is a wrapper around backendPluginClient +// that also contains its plugin.Client instance. It's primarily +// used to cleanly kill the client on Cleanup() +type BackendPluginClient struct { + client *plugin.Client + sync.Mutex + + logical.Backend +} + +// Cleanup calls the RPC client's Cleanup() func and also calls +// the go-plugin's client Kill() func +func (b *BackendPluginClient) Cleanup(ctx context.Context) { + b.Backend.Cleanup(ctx) + b.client.Kill() +} + +// NewBackend will return an instance of an RPC-based client implementation of the backend for +// external plugins, or a concrete implementation of the backend if it is a builtin backend. +// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether +// the plugin should run in metadata mode. +func NewBackend(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool) (logical.Backend, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPlugin(ctx, pluginName, pluginType) + if err != nil { + return nil, err + } + + var backend logical.Backend + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to logical.Factory. + rawFactory, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err) + } + + if factory, ok := rawFactory.(logical.Factory); !ok { + return nil, fmt.Errorf("unsupported backend type: %q", pluginName) + } else { + if backend, err = factory(ctx, conf); err != nil { + return nil, err + } + } + } else { + // create a backendPluginClient instance + backend, err = NewPluginClient(ctx, sys, pluginRunner, conf.Logger, isMetadataMode) + if err != nil { + return nil, err + } + } + + return backend, nil +} + +func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) { + // pluginMap is the map of plugins we can dispense. + pluginSet := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: plugin.PluginSet{ + "backend": &GRPCBackendPlugin{ + MetadataMode: isMetadataMode, + }, + }, + 4: plugin.PluginSet{ + "backend": &GRPCBackendPlugin{ + MetadataMode: isMetadataMode, + }, + }, + } + + namedLogger := logger.Named(pluginRunner.Name) + + var client *plugin.Client + var err error + if isMetadataMode { + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger) + } else { + client, err = pluginRunner.Run(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger) + } + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("backend") + if err != nil { + return nil, err + } + + var backend logical.Backend + var transport string + // We should have a logical backend type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + switch raw.(type) { + case *backendGRPCPluginClient: + backend = raw.(*backendGRPCPluginClient) + transport = "gRPC" + default: + return nil, errors.New("unsupported plugin client type") + } + + // Wrap the backend in a tracing middleware + if namedLogger.IsTrace() { + backend = &backendTracingMiddleware{ + logger: namedLogger.With("transport", transport), + next: backend, + } + } + + return &BackendPluginClient{ + client: client, + Backend: backend, + }, nil +} + +// wrapError takes a generic error type and makes it usable with the plugin +// interface. Only errors which have exported fields and have been registered +// with gob can be unwrapped and transported. This checks error types and, if +// none match, wrap the error in a plugin.BasicError. +func wrapError(err error) error { + if err == nil { + return nil + } + + switch err.(type) { + case *plugin.BasicError, + logical.HTTPCodedError, + *logical.StatusBadRequest: + return err + } + + return plugin.NewBasicError(err) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go b/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go new file mode 100644 index 00000000..b8cd3e58 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go @@ -0,0 +1,92 @@ +package plugin + +import ( + "crypto/tls" + "math" + "os" + + "google.golang.org/grpc" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// BackendPluginName is the name of the plugin that can be +// dispensed from the plugin server. +const BackendPluginName = "backend" + +type TLSProviderFunc func() (*tls.Config, error) + +type ServeOpts struct { + BackendFactoryFunc logical.Factory + TLSProviderFunc TLSProviderFunc + Logger log.Logger +} + +// Serve is a helper function used to serve a backend plugin. This +// should be ran on the plugin's main process. +func Serve(opts *ServeOpts) error { + logger := opts.Logger + if logger == nil { + logger = log.New(&log.LoggerOptions{ + Level: log.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // pluginMap is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: plugin.PluginSet{ + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + 4: plugin.PluginSet{ + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + } + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + return err + } + + serveOpts := &plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + VersionedPlugins: pluginSets, + TLSProvider: opts.TLSProviderFunc, + Logger: logger, + + // A non-nil value here enables gRPC serving for this plugin... + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) + opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) + return plugin.DefaultGRPCServer(opts) + }, + } + + plugin.Serve(serveOpts) + + return nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 4, + MagicCookieKey: "VAULT_BACKEND_PLUGIN", + MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20", +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/cgo.go b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go new file mode 100644 index 00000000..2ed493a1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go @@ -0,0 +1,7 @@ +// +build cgo + +package version + +func init() { + CgoEnabled = true +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version.go b/vendor/github.com/hashicorp/vault/sdk/version/version.go new file mode 100644 index 00000000..e0db36e8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version.go @@ -0,0 +1,74 @@ +package version + +import ( + "bytes" + "fmt" +) + +// VersionInfo +type VersionInfo struct { + Revision string + Version string + VersionPrerelease string + VersionMetadata string +} + +func GetVersion() *VersionInfo { + ver := Version + rel := VersionPrerelease + md := VersionMetadata + if GitDescribe != "" { + ver = GitDescribe + } + if GitDescribe == "" && rel == "" && VersionPrerelease != "" { + rel = "dev" + } + + return &VersionInfo{ + Revision: GitCommit, + Version: ver, + VersionPrerelease: rel, + VersionMetadata: md, + } +} + +func (c *VersionInfo) VersionNumber() string { + if Version == "unknown" && VersionPrerelease == "unknown" { + return "(version unknown)" + } + + version := fmt.Sprintf("%s", c.Version) + + if c.VersionPrerelease != "" { + version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) + } + + return version +} + +func (c *VersionInfo) FullVersionNumber(rev bool) string { + var versionString bytes.Buffer + + if Version == "unknown" && VersionPrerelease == "unknown" { + return "Vault (version unknown)" + } + + fmt.Fprintf(&versionString, "Vault v%s", c.Version) + if c.VersionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) + } + + if rev && c.Revision != "" { + fmt.Fprintf(&versionString, " (%s)", c.Revision) + } + + return versionString.String() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go new file mode 100644 index 00000000..5654ee18 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go @@ -0,0 +1,14 @@ +package version + +var ( + // The git commit that was compiled. This will be filled in by the compiler. + GitCommit string + GitDescribe string + + // Whether cgo is enabled or not; set at build time + CgoEnabled bool + + Version = "1.3.3" + VersionPrerelease = "" + VersionMetadata = "" +) diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir.go b/vendor/github.com/hashicorp/vault/shamir/shamir.go new file mode 100644 index 00000000..f2ba820f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/shamir/shamir.go @@ -0,0 +1,262 @@ +package shamir + +import ( + "crypto/rand" + "crypto/subtle" + "fmt" + mathrand "math/rand" + "time" + + "github.com/hashicorp/errwrap" +) + +const ( + // ShareOverhead is the byte size overhead of each share + // when using Split on a secret. This is caused by appending + // a one byte tag to the share. + ShareOverhead = 1 +) + +// polynomial represents a polynomial of arbitrary degree +type polynomial struct { + coefficients []uint8 +} + +// makePolynomial constructs a random polynomial of the given +// degree but with the provided intercept value. +func makePolynomial(intercept, degree uint8) (polynomial, error) { + // Create a wrapper + p := polynomial{ + coefficients: make([]byte, degree+1), + } + + // Ensure the intercept is set + p.coefficients[0] = intercept + + // Assign random co-efficients to the polynomial + if _, err := rand.Read(p.coefficients[1:]); err != nil { + return p, err + } + + return p, nil +} + +// evaluate returns the value of the polynomial for the given x +func (p *polynomial) evaluate(x uint8) uint8 { + // Special case the origin + if x == 0 { + return p.coefficients[0] + } + + // Compute the polynomial value using Horner's method. + degree := len(p.coefficients) - 1 + out := p.coefficients[degree] + for i := degree - 1; i >= 0; i-- { + coeff := p.coefficients[i] + out = add(mult(out, x), coeff) + } + return out +} + +// interpolatePolynomial takes N sample points and returns +// the value at a given x using a lagrange interpolation. +func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 { + limit := len(x_samples) + var result, basis uint8 + for i := 0; i < limit; i++ { + basis = 1 + for j := 0; j < limit; j++ { + if i == j { + continue + } + num := add(x, x_samples[j]) + denom := add(x_samples[i], x_samples[j]) + term := div(num, denom) + basis = mult(basis, term) + } + group := mult(y_samples[i], basis) + result = add(result, group) + } + return result +} + +// div divides two numbers in GF(2^8) +func div(a, b uint8) uint8 { + if b == 0 { + // leaks some timing information but we don't care anyways as this + // should never happen, hence the panic + panic("divide by zero") + } + + var goodVal, zero uint8 + log_a := logTable[a] + log_b := logTable[b] + diff := (int(log_a) - int(log_b)) % 255 + if diff < 0 { + diff += 255 + } + + ret := expTable[diff] + + // Ensure we return zero if a is zero but aren't subject to timing attacks + goodVal = ret + + if subtle.ConstantTimeByteEq(a, 0) == 1 { + ret = zero + } else { + ret = goodVal + } + + return ret +} + +// mult multiplies two numbers in GF(2^8) +func mult(a, b uint8) (out uint8) { + var goodVal, zero uint8 + log_a := logTable[a] + log_b := logTable[b] + sum := (int(log_a) + int(log_b)) % 255 + + ret := expTable[sum] + + // Ensure we return zero if either a or b are zero but aren't subject to + // timing attacks + goodVal = ret + + if subtle.ConstantTimeByteEq(a, 0) == 1 { + ret = zero + } else { + ret = goodVal + } + + if subtle.ConstantTimeByteEq(b, 0) == 1 { + ret = zero + } else { + // This operation does not do anything logically useful. It + // only ensures a constant number of assignments to thwart + // timing attacks. + goodVal = zero + } + + return ret +} + +// add combines two numbers in GF(2^8) +// This can also be used for subtraction since it is symmetric. +func add(a, b uint8) uint8 { + return a ^ b +} + +// Split takes an arbitrarily long secret and generates a `parts` +// number of shares, `threshold` of which are required to reconstruct +// the secret. The parts and threshold must be at least 2, and less +// than 256. The returned shares are each one byte longer than the secret +// as they attach a tag used to reconstruct the secret. +func Split(secret []byte, parts, threshold int) ([][]byte, error) { + // Sanity check the input + if parts < threshold { + return nil, fmt.Errorf("parts cannot be less than threshold") + } + if parts > 255 { + return nil, fmt.Errorf("parts cannot exceed 255") + } + if threshold < 2 { + return nil, fmt.Errorf("threshold must be at least 2") + } + if threshold > 255 { + return nil, fmt.Errorf("threshold cannot exceed 255") + } + if len(secret) == 0 { + return nil, fmt.Errorf("cannot split an empty secret") + } + + // Generate random list of x coordinates + mathrand.Seed(time.Now().UnixNano()) + xCoordinates := mathrand.Perm(255) + + // Allocate the output array, initialize the final byte + // of the output with the offset. The representation of each + // output is {y1, y2, .., yN, x}. + out := make([][]byte, parts) + for idx := range out { + out[idx] = make([]byte, len(secret)+1) + out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1 + } + + // Construct a random polynomial for each byte of the secret. + // Because we are using a field of size 256, we can only represent + // a single byte as the intercept of the polynomial, so we must + // use a new polynomial for each byte. + for idx, val := range secret { + p, err := makePolynomial(val, uint8(threshold-1)) + if err != nil { + return nil, errwrap.Wrapf("failed to generate polynomial: {{err}}", err) + } + + // Generate a `parts` number of (x,y) pairs + // We cheat by encoding the x value once as the final index, + // so that it only needs to be stored once. + for i := 0; i < parts; i++ { + x := uint8(xCoordinates[i]) + 1 + y := p.evaluate(x) + out[i][idx] = y + } + } + + // Return the encoded secrets + return out, nil +} + +// Combine is used to reverse a Split and reconstruct a secret +// once a `threshold` number of parts are available. +func Combine(parts [][]byte) ([]byte, error) { + // Verify enough parts provided + if len(parts) < 2 { + return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret") + } + + // Verify the parts are all the same length + firstPartLen := len(parts[0]) + if firstPartLen < 2 { + return nil, fmt.Errorf("parts must be at least two bytes") + } + for i := 1; i < len(parts); i++ { + if len(parts[i]) != firstPartLen { + return nil, fmt.Errorf("all parts must be the same length") + } + } + + // Create a buffer to store the reconstructed secret + secret := make([]byte, firstPartLen-1) + + // Buffer to store the samples + x_samples := make([]uint8, len(parts)) + y_samples := make([]uint8, len(parts)) + + // Set the x value for each sample and ensure no x_sample values are the same, + // otherwise div() can be unhappy + checkMap := map[byte]bool{} + for i, part := range parts { + samp := part[firstPartLen-1] + if exists := checkMap[samp]; exists { + return nil, fmt.Errorf("duplicate part detected") + } + checkMap[samp] = true + x_samples[i] = samp + } + + // Reconstruct each byte + for idx := range secret { + // Set the y value for each sample + for i, part := range parts { + y_samples[i] = part[idx] + } + + // Interpolate the polynomial and compute the value at 0 + val := interpolatePolynomial(x_samples, y_samples, 0) + + // Evaluate the 0th value to get the intercept + secret[idx] = val + } + return secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/shamir/tables.go b/vendor/github.com/hashicorp/vault/shamir/tables.go new file mode 100644 index 00000000..76c245e7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/shamir/tables.go @@ -0,0 +1,77 @@ +package shamir + +// Tables taken from http://www.samiam.org/galois.html +// They use 0xe5 (229) as the generator + +var ( + // logTable provides the log(X)/log(g) at each index X + logTable = [256]uint8{ + 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36, + 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18, + 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f, + 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e, + 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53, + 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3, + 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21, + 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74, + 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4, + 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1, + 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13, + 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80, + 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12, + 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5, + 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56, + 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba, + 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3, + 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47, + 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf, + 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05, + 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67, + 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd, + 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34, + 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec, + 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7, + 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e, + 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a, + 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d, + 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c, + 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d, + 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0, + 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38} + + // expTable provides the anti-log or exponentiation value + // for the equivalent index + expTable = [256]uint8{ + 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12, + 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36, + 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a, + 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee, + 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29, + 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b, + 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d, + 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c, + 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f, + 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a, + 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85, + 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94, + 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7, + 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2, + 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d, + 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17, + 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39, + 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b, + 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd, + 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c, + 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84, + 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97, + 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2, + 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd, + 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c, + 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24, + 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c, + 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4, + 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7, + 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52, + 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6, + 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01} +) diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go new file mode 100644 index 00000000..1313fa33 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/acl.go @@ -0,0 +1,732 @@ +package vault + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/armon/go-radix" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" +) + +// ACL is used to wrap a set of policies to provide +// an efficient interface for access control. +type ACL struct { + // exactRules contains the path policies that are exact + exactRules *radix.Tree + + // prefixRules contains the path policies that are a prefix + prefixRules *radix.Tree + + segmentWildcardPaths map[string]interface{} + + // root is enabled if the "root" named policy is present. + root bool + + // Stores policies that are actually RGPs for later fetching + rgpPolicies []*Policy +} + +type PolicyCheckOpts struct { + RootPrivsRequired bool + Unauth bool +} + +type AuthResults struct { + ACLResults *ACLResults + Allowed bool + RootPrivs bool + DeniedError bool + Error *multierror.Error +} + +type ACLResults struct { + Allowed bool + RootPrivs bool + IsRoot bool + MFAMethods []string + ControlGroup *ControlGroup + CapabilitiesBitmap uint32 +} + +// NewACL is used to construct a policy based ACL from a set of policies. +func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) { + // Initialize + a := &ACL{ + exactRules: radix.New(), + prefixRules: radix.New(), + segmentWildcardPaths: make(map[string]interface{}, len(policies)), + root: false, + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns == nil { + return nil, namespace.ErrNoNamespace + } + + // Inject each policy + for _, policy := range policies { + // Ignore a nil policy object + if policy == nil { + continue + } + + switch policy.Type { + case PolicyTypeACL: + case PolicyTypeRGP: + a.rgpPolicies = append(a.rgpPolicies, policy) + continue + default: + return nil, fmt.Errorf("unable to parse policy (wrong type)") + } + + // Check if this is root + if policy.Name == "root" { + if ns.ID != namespace.RootNamespaceID { + return nil, fmt.Errorf("root policy is only allowed in root namespace") + } + + if len(policies) != 1 { + return nil, fmt.Errorf("other policies present along with root") + } + a.root = true + } + + for _, pc := range policy.Paths { + var raw interface{} + var ok bool + var tree *radix.Tree + + switch { + case pc.HasSegmentWildcards: + raw, ok = a.segmentWildcardPaths[pc.Path] + default: + // Check which tree to use + tree = a.exactRules + if pc.IsPrefix { + tree = a.prefixRules + } + + // Check for an existing policy + raw, ok = tree.Get(pc.Path) + } + + if !ok { + clonedPerms, err := pc.Permissions.Clone() + if err != nil { + return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err) + } + switch { + case pc.HasSegmentWildcards: + a.segmentWildcardPaths[pc.Path] = clonedPerms + default: + tree.Insert(pc.Path, clonedPerms) + } + continue + } + + // these are the ones already in the tree + existingPerms := raw.(*ACLPermissions) + + switch { + case existingPerms.CapabilitiesBitmap&DenyCapabilityInt > 0: + // If we are explicitly denied in the existing capability set, + // don't save anything else + continue + + case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0: + // If this new policy explicitly denies, only save the deny value + existingPerms.CapabilitiesBitmap = DenyCapabilityInt + existingPerms.AllowedParameters = nil + existingPerms.DeniedParameters = nil + goto INSERT + + default: + // Insert the capabilities in this new policy into the existing + // value + existingPerms.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap + } + + // Note: In these stanzas, we're preferring minimum lifetimes. So + // we take the lesser of two specified max values, or we take the + // lesser of two specified min values, the idea being, allowing + // token lifetime to be minimum possible. + // + // If we have an existing max, and we either don't have a current + // max, or the current is greater than the previous, use the + // existing. + if pc.Permissions.MaxWrappingTTL > 0 && + (existingPerms.MaxWrappingTTL == 0 || + pc.Permissions.MaxWrappingTTL < existingPerms.MaxWrappingTTL) { + existingPerms.MaxWrappingTTL = pc.Permissions.MaxWrappingTTL + } + // If we have an existing min, and we either don't have a current + // min, or the current is greater than the previous, use the + // existing + if pc.Permissions.MinWrappingTTL > 0 && + (existingPerms.MinWrappingTTL == 0 || + pc.Permissions.MinWrappingTTL < existingPerms.MinWrappingTTL) { + existingPerms.MinWrappingTTL = pc.Permissions.MinWrappingTTL + } + + if len(pc.Permissions.AllowedParameters) > 0 { + if existingPerms.AllowedParameters == nil { + clonedAllowed, err := copystructure.Copy(pc.Permissions.AllowedParameters) + if err != nil { + return nil, err + } + existingPerms.AllowedParameters = clonedAllowed.(map[string][]interface{}) + } else { + for key, value := range pc.Permissions.AllowedParameters { + pcValue, ok := existingPerms.AllowedParameters[key] + // If an empty array exist it should overwrite any other + // value. + if len(value) == 0 || (ok && len(pcValue) == 0) { + existingPerms.AllowedParameters[key] = []interface{}{} + } else { + // Merge the two maps, appending values on key conflict. + existingPerms.AllowedParameters[key] = append(value, existingPerms.AllowedParameters[key]...) + } + } + } + } + + if len(pc.Permissions.DeniedParameters) > 0 { + if existingPerms.DeniedParameters == nil { + clonedDenied, err := copystructure.Copy(pc.Permissions.DeniedParameters) + if err != nil { + return nil, err + } + existingPerms.DeniedParameters = clonedDenied.(map[string][]interface{}) + } else { + for key, value := range pc.Permissions.DeniedParameters { + pcValue, ok := existingPerms.DeniedParameters[key] + // If an empty array exist it should overwrite any other + // value. + if len(value) == 0 || (ok && len(pcValue) == 0) { + existingPerms.DeniedParameters[key] = []interface{}{} + } else { + // Merge the two maps, appending values on key conflict. + existingPerms.DeniedParameters[key] = append(value, existingPerms.DeniedParameters[key]...) + } + } + } + } + + if len(pc.Permissions.RequiredParameters) > 0 { + if len(existingPerms.RequiredParameters) == 0 { + existingPerms.RequiredParameters = pc.Permissions.RequiredParameters + } else { + for _, v := range pc.Permissions.RequiredParameters { + if !strutil.StrListContains(existingPerms.RequiredParameters, v) { + existingPerms.RequiredParameters = append(existingPerms.RequiredParameters, v) + } + } + } + } + + if len(pc.Permissions.MFAMethods) > 0 { + if existingPerms.MFAMethods == nil { + existingPerms.MFAMethods = pc.Permissions.MFAMethods + } else { + for _, method := range pc.Permissions.MFAMethods { + existingPerms.MFAMethods = append(existingPerms.MFAMethods, method) + } + } + existingPerms.MFAMethods = strutil.RemoveDuplicates(existingPerms.MFAMethods, false) + } + + // No need to dedupe this list since any authorization can satisfy any factor + if pc.Permissions.ControlGroup != nil { + if len(pc.Permissions.ControlGroup.Factors) > 0 { + if existingPerms.ControlGroup == nil { + existingPerms.ControlGroup = pc.Permissions.ControlGroup + } else { + for _, authz := range pc.Permissions.ControlGroup.Factors { + existingPerms.ControlGroup.Factors = append(existingPerms.ControlGroup.Factors, authz) + } + } + } + } + + INSERT: + switch { + case pc.HasSegmentWildcards: + a.segmentWildcardPaths[pc.Path] = existingPerms + default: + tree.Insert(pc.Path, existingPerms) + } + } + } + return a, nil +} + +func (a *ACL) Capabilities(ctx context.Context, path string) (pathCapabilities []string) { + req := &logical.Request{ + Path: path, + // doesn't matter, but use List to trigger fallback behavior so we can + // model real behavior + Operation: logical.ListOperation, + } + + res := a.AllowOperation(ctx, req, true) + if res.IsRoot { + return []string{RootCapability} + } + + capabilities := res.CapabilitiesBitmap + + if capabilities&SudoCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, SudoCapability) + } + if capabilities&ReadCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, ReadCapability) + } + if capabilities&ListCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, ListCapability) + } + if capabilities&UpdateCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, UpdateCapability) + } + if capabilities&DeleteCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, DeleteCapability) + } + if capabilities&CreateCapabilityInt > 0 { + pathCapabilities = append(pathCapabilities, CreateCapability) + } + + // If "deny" is explicitly set or if the path has no capabilities at all, + // set the path capabilities to "deny" + if capabilities&DenyCapabilityInt > 0 || len(pathCapabilities) == 0 { + pathCapabilities = []string{DenyCapability} + } + return +} + +// AllowOperation is used to check if the given operation is permitted. +func (a *ACL) AllowOperation(ctx context.Context, req *logical.Request, capCheckOnly bool) (ret *ACLResults) { + ret = new(ACLResults) + + // Fast-path root + if a.root { + ret.Allowed = true + ret.RootPrivs = true + ret.IsRoot = true + return + } + op := req.Operation + + // Help is always allowed + if op == logical.HelpOperation { + ret.Allowed = true + return + } + + var permissions *ACLPermissions + + ns, err := namespace.FromContext(ctx) + if err != nil { + return + } + path := ns.Path + req.Path + + // The request path should take care of this already but this is useful for + // tests and as defense in depth + for { + if len(path) > 0 && path[0] == '/' { + path = path[1:] + } else { + break + } + } + + // Find an exact matching rule, look for prefix if no match + var capabilities uint32 + raw, ok := a.exactRules.Get(path) + if ok { + permissions = raw.(*ACLPermissions) + capabilities = permissions.CapabilitiesBitmap + goto CHECK + } + if op == logical.ListOperation { + raw, ok = a.exactRules.Get(strings.TrimSuffix(path, "/")) + if ok { + permissions = raw.(*ACLPermissions) + capabilities = permissions.CapabilitiesBitmap + goto CHECK + } + } + + permissions = a.CheckAllowedFromNonExactPaths(path, false) + if permissions != nil { + capabilities = permissions.CapabilitiesBitmap + goto CHECK + } + + // No exact, prefix, or segment wildcard paths found, return without + // setting allowed + return + +CHECK: + // Check if the minimum permissions are met + // If "deny" has been explicitly set, only deny will be in the map, so we + // only need to check for the existence of other values + ret.RootPrivs = capabilities&SudoCapabilityInt > 0 + + // This is after the RootPrivs check so we can gate on it being from sudo + // rather than policy root + if capCheckOnly { + ret.CapabilitiesBitmap = capabilities + return ret + } + + ret.MFAMethods = permissions.MFAMethods + ret.ControlGroup = permissions.ControlGroup + + operationAllowed := false + switch op { + case logical.ReadOperation: + operationAllowed = capabilities&ReadCapabilityInt > 0 + case logical.ListOperation: + operationAllowed = capabilities&ListCapabilityInt > 0 + case logical.UpdateOperation: + operationAllowed = capabilities&UpdateCapabilityInt > 0 + case logical.DeleteOperation: + operationAllowed = capabilities&DeleteCapabilityInt > 0 + case logical.CreateOperation: + operationAllowed = capabilities&CreateCapabilityInt > 0 + + // These three re-use UpdateCapabilityInt since that's the most appropriate + // capability/operation mapping + case logical.RevokeOperation, logical.RenewOperation, logical.RollbackOperation: + operationAllowed = capabilities&UpdateCapabilityInt > 0 + + default: + return + } + + if !operationAllowed { + return + } + + if permissions.MaxWrappingTTL > 0 { + if req.WrapInfo == nil || req.WrapInfo.TTL > permissions.MaxWrappingTTL { + return + } + } + if permissions.MinWrappingTTL > 0 { + if req.WrapInfo == nil || req.WrapInfo.TTL < permissions.MinWrappingTTL { + return + } + } + // This situation can happen because of merging, even though in a single + // path statement we check on ingress + if permissions.MinWrappingTTL != 0 && + permissions.MaxWrappingTTL != 0 && + permissions.MaxWrappingTTL < permissions.MinWrappingTTL { + return + } + + // Only check parameter permissions for operations that can modify + // parameters. + if op == logical.ReadOperation || op == logical.UpdateOperation || op == logical.CreateOperation { + for _, parameter := range permissions.RequiredParameters { + if _, ok := req.Data[strings.ToLower(parameter)]; !ok { + return + } + } + + // If there are no data fields, allow + if len(req.Data) == 0 { + ret.Allowed = true + return + } + + if len(permissions.DeniedParameters) == 0 { + goto ALLOWED_PARAMETERS + } + + // Check if all parameters have been denied + if _, ok := permissions.DeniedParameters["*"]; ok { + return + } + + for parameter, value := range req.Data { + // Check if parameter has been explicitly denied + if valueSlice, ok := permissions.DeniedParameters[strings.ToLower(parameter)]; ok { + // If the value exists in denied values slice, deny + if valueInParameterList(value, valueSlice) { + return + } + } + } + + ALLOWED_PARAMETERS: + // If we don't have any allowed parameters set, allow + if len(permissions.AllowedParameters) == 0 { + ret.Allowed = true + return + } + + _, allowedAll := permissions.AllowedParameters["*"] + if len(permissions.AllowedParameters) == 1 && allowedAll { + ret.Allowed = true + return + } + + for parameter, value := range req.Data { + valueSlice, ok := permissions.AllowedParameters[strings.ToLower(parameter)] + // Requested parameter is not in allowed list + if !ok && !allowedAll { + return + } + + // If the value doesn't exists in the allowed values slice, + // deny + if ok && !valueInParameterList(value, valueSlice) { + return + } + } + } + + ret.Allowed = true + return +} + +type wcPathDescr struct { + firstWCOrGlob int + wildcards int + isPrefix bool + wcPath string + perms *ACLPermissions +} + +// CheckAllowedFromNonExactPaths returns permissions corresponding to a +// matching path with wildcards/globs. If bareMount is true, the path should +// correspond to a mount prefix, and what is returned is either a non-nil set +// of permissions from some allowed path underneath the mount (for use in mount +// access checks), or nil indicating no non-deny permissions were found. +func (a *ACL) CheckAllowedFromNonExactPaths(path string, bareMount bool) *ACLPermissions { + wcPathDescrs := make([]wcPathDescr, 0, len(a.segmentWildcardPaths)+1) + + less := func(i, j int) bool { + // In the case of multiple matches, we use this priority order, + // which tries to most closely match longest-prefix: + // + // * First glob or wildcard position (prefer foo/a* over foo/+, + // foo/bar/+/baz over foo/+/bar/baz) + // * Whether it's a prefix (prefer foo/+/bar over foo/+/ba*, + // foo/+ over foo/*) + // * Number of wildcard segments (prefer foo/bar/+/baz over foo/+/+/baz) + // * Length check (prefer foo/+/bar/ba* over foo/+/bar/b*) + // * Lexicographical ordering (preferring less, arbitrarily) + // + // That final case (lexigraphical) should never really come up. It's more + // of a throwing-up-hands scenario akin to panic("should not be here") + // statements, but less panicky. + + pdi, pdj := wcPathDescrs[i], wcPathDescrs[j] + + // If the first wildcard (+) or glob (*) occurs earlier in pdi, + // pdi is lower priority + if pdi.firstWCOrGlob < pdj.firstWCOrGlob { + return true + } else if pdi.firstWCOrGlob > pdj.firstWCOrGlob { + return false + } + + // If pdi ends in * and pdj doesn't, pdi is lower priority + if pdi.isPrefix && !pdj.isPrefix { + return true + } else if !pdi.isPrefix && pdj.isPrefix { + return false + } + + // If pdi has more wc segs, pdi is lower priority + if pdi.wildcards > pdj.wildcards { + return true + } else if pdi.wildcards < pdj.wildcards { + return false + } + + // If pdi is shorter, it is lower priority + if len(pdi.wcPath) < len(pdj.wcPath) { + return true + } else if len(pdi.wcPath) > len(pdj.wcPath) { + return false + } + + // If pdi is smaller lexicographically, it is lower priority + if pdi.wcPath < pdj.wcPath { + return true + } else if pdi.wcPath > pdj.wcPath { + return false + } + return false + } + + // Find a prefix rule if any. + { + prefix, raw, ok := a.prefixRules.LongestPrefix(path) + if ok { + if len(a.segmentWildcardPaths) == 0 { + return raw.(*ACLPermissions) + } + wcPathDescrs = append(wcPathDescrs, wcPathDescr{ + firstWCOrGlob: len(prefix), + wcPath: prefix, + isPrefix: true, + perms: raw.(*ACLPermissions), + }) + } + } + + if len(a.segmentWildcardPaths) == 0 { + return nil + } + + pathParts := strings.Split(path, "/") + +SWCPATH: + for fullWCPath := range a.segmentWildcardPaths { + if fullWCPath == "" { + continue + } + pd := wcPathDescr{firstWCOrGlob: strings.Index(fullWCPath, "+")} + + currWCPath := fullWCPath + if currWCPath[len(currWCPath)-1] == '*' { + pd.isPrefix = true + currWCPath = currWCPath[0 : len(currWCPath)-1] + } + pd.wcPath = currWCPath + + splitCurrWCPath := strings.Split(currWCPath, "/") + + if !bareMount && len(pathParts) < len(splitCurrWCPath) { + // check if the path coming in is shorter; if so it can't match + continue + } + if !bareMount && !pd.isPrefix && len(splitCurrWCPath) != len(pathParts) { + // If it's not a prefix we expect the same number of segments + continue + } + + segments := make([]string, 0, len(splitCurrWCPath)) + for i, aclPart := range splitCurrWCPath { + switch { + case aclPart == "+": + pd.wildcards++ + segments = append(segments, pathParts[i]) + + case aclPart == pathParts[i]: + segments = append(segments, pathParts[i]) + + case pd.isPrefix && i == len(splitCurrWCPath)-1 && strings.HasPrefix(pathParts[i], aclPart): + segments = append(segments, pathParts[i:]...) + + case !bareMount: + // Found a mismatch, give up on this segmentWildcardPath + continue SWCPATH + } + + // -2 because we're always invoked with a trailing "/" in case bareMount. + if bareMount && i == len(pathParts)-2 { + joinedPath := strings.Join(segments, "/") + "/" + // Check the current joined path so far. If we find a prefix, + // check permissions. If they're defined but not deny, success. + if strings.HasPrefix(joinedPath, path) { + permissions := a.segmentWildcardPaths[fullWCPath].(*ACLPermissions) + if permissions.CapabilitiesBitmap&DenyCapabilityInt == 0 && permissions.CapabilitiesBitmap > 0 { + return permissions + } + } + continue SWCPATH + } + } + pd.perms = a.segmentWildcardPaths[fullWCPath].(*ACLPermissions) + wcPathDescrs = append(wcPathDescrs, pd) + } + + if bareMount || len(wcPathDescrs) == 0 { + return nil + } + + // We don't do this in the bare mount check because we don't care about + // priority, we only care about any capability at all. + sort.Slice(wcPathDescrs, less) + + return wcPathDescrs[len(wcPathDescrs)-1].perms +} + +func (c *Core) performPolicyChecks(ctx context.Context, acl *ACL, te *logical.TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts) *AuthResults { + ret := new(AuthResults) + + // First, perform normal ACL checks if requested. The only time no ACL + // should be applied is if we are only processing EGPs against a login + // path in which case opts.Unauth will be set. + if acl != nil && !opts.Unauth { + ret.ACLResults = acl.AllowOperation(ctx, req, false) + ret.RootPrivs = ret.ACLResults.RootPrivs + // Root is always allowed; skip Sentinel/MFA checks + if ret.ACLResults.IsRoot { + //logger.Warn("token is root, skipping checks") + ret.Allowed = true + return ret + } + if !ret.ACLResults.Allowed { + return ret + } + if !ret.RootPrivs && opts.RootPrivsRequired { + return ret + } + } + + c.performEntPolicyChecks(ctx, acl, te, req, inEntity, opts, ret) + + return ret +} + +func valueInParameterList(v interface{}, list []interface{}) bool { + // Empty list is equivalent to the item always existing in the list + if len(list) == 0 { + return true + } + + return valueInSlice(v, list) +} + +func valueInSlice(v interface{}, list []interface{}) bool { + for _, el := range list { + if el == nil || v == nil { + // It doesn't seem possible to set up a nil entry in the list, but it is possible + // to pass in a null entry in the API request being checked. Just in case, + // nil will match nil. + if el == v { + return true + } + } else if reflect.TypeOf(el).String() == "string" && reflect.TypeOf(v).String() == "string" { + item := el.(string) + val := v.(string) + + if strutil.GlobbedStringsMatch(item, val) { + return true + } + } else if reflect.DeepEqual(el, v) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/vault/vault/acl_util.go b/vendor/github.com/hashicorp/vault/vault/acl_util.go new file mode 100644 index 00000000..b3ea126b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/acl_util.go @@ -0,0 +1,14 @@ +// +build !enterprise + +package vault + +import ( + "context" + + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/sdk/logical" +) + +func (c *Core) performEntPolicyChecks(ctx context.Context, acl *ACL, te *logical.TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts, ret *AuthResults) { + ret.Allowed = true +} diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go new file mode 100644 index 00000000..3c54789a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/audit.go @@ -0,0 +1,527 @@ +package vault + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "strings" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // coreAuditConfigPath is used to store the audit configuration. + // Audit configuration is protected within the Vault itself, which means it + // can only be viewed or modified after an unseal. + coreAuditConfigPath = "core/audit" + + // coreLocalAuditConfigPath is used to store audit information for local + // (non-replicated) mounts + coreLocalAuditConfigPath = "core/local-audit" + + // auditBarrierPrefix is the prefix to the UUID used in the + // barrier view for the audit backends. + auditBarrierPrefix = "audit/" + + // auditTableType is the value we expect to find for the audit table and + // corresponding entries + auditTableType = "audit" +) + +var ( + // loadAuditFailed if loading audit tables encounters an error + errLoadAuditFailed = errors.New("failed to setup audit table") +) + +// enableAudit is used to enable a new audit backend +func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage bool) error { + // Ensure we end the path in a slash + if !strings.HasSuffix(entry.Path, "/") { + entry.Path += "/" + } + + // Ensure there is a name + if entry.Path == "/" { + return fmt.Errorf("backend path must be specified") + } + + // Update the audit table + c.auditLock.Lock() + defer c.auditLock.Unlock() + + // Look for matching name + for _, ent := range c.audit.Entries { + switch { + // Existing is sql/mysql/ new is sql/ or + // existing is sql/ and new is sql/mysql/ + case strings.HasPrefix(ent.Path, entry.Path): + fallthrough + case strings.HasPrefix(entry.Path, ent.Path): + return fmt.Errorf("path already in use") + } + } + + // Generate a new UUID and view + if entry.UUID == "" { + entryUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.UUID = entryUUID + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("audit_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } + viewPath := entry.ViewPath() + view := NewBarrierView(c.barrier, viewPath) + addAuditPathChecker(c, entry, view, viewPath) + origViewReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + defer view.setReadOnlyErr(origViewReadOnlyErr) + + // Lookup the new backend + backend, err := c.newAuditBackend(ctx, entry, view, entry.Options) + if err != nil { + return err + } + if backend == nil { + return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type) + } + + newTable := c.audit.shallowClone() + newTable.Entries = append(newTable.Entries, entry) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + entry.NamespaceID = ns.ID + entry.namespace = ns + + if updateStorage { + if err := c.persistAudit(ctx, newTable, entry.Local); err != nil { + return errors.New("failed to update audit table") + } + } + + c.audit = newTable + + // Register the backend + c.auditBroker.Register(entry.Path, backend, view, entry.Local) + if c.logger.IsInfo() { + c.logger.Info("enabled audit backend", "path", entry.Path, "type", entry.Type) + } + + return nil +} + +// disableAudit is used to disable an existing audit backend +func (c *Core) disableAudit(ctx context.Context, path string, updateStorage bool) (bool, error) { + // Ensure we end the path in a slash + if !strings.HasSuffix(path, "/") { + path += "/" + } + + // Ensure there is a name + if path == "/" { + return false, fmt.Errorf("backend path must be specified") + } + + // Remove the entry from the mount table + c.auditLock.Lock() + defer c.auditLock.Unlock() + + newTable := c.audit.shallowClone() + entry, err := newTable.remove(ctx, path) + if err != nil { + return false, err + } + + // Ensure there was a match + if entry == nil { + return false, fmt.Errorf("no matching backend") + } + + c.removeAuditReloadFunc(entry) + + // When unmounting all entries the JSON code will load back up from storage + // as a nil slice, which kills tests...just set it nil explicitly + if len(newTable.Entries) == 0 { + newTable.Entries = nil + } + + if updateStorage { + // Update the audit table + if err := c.persistAudit(ctx, newTable, entry.Local); err != nil { + return true, errors.New("failed to update audit table") + } + } + + c.audit = newTable + + // Unmount the backend + c.auditBroker.Deregister(path) + if c.logger.IsInfo() { + c.logger.Info("disabled audit backend", "path", path) + } + + removeAuditPathChecker(c, entry) + + return true, nil +} + +// loadAudits is invoked as part of postUnseal to load the audit table +func (c *Core) loadAudits(ctx context.Context) error { + auditTable := &MountTable{} + localAuditTable := &MountTable{} + + // Load the existing audit table + raw, err := c.barrier.Get(ctx, coreAuditConfigPath) + if err != nil { + c.logger.Error("failed to read audit table", "error", err) + return errLoadAuditFailed + } + rawLocal, err := c.barrier.Get(ctx, coreLocalAuditConfigPath) + if err != nil { + c.logger.Error("failed to read local audit table", "error", err) + return errLoadAuditFailed + } + + c.auditLock.Lock() + defer c.auditLock.Unlock() + + if raw != nil { + if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil { + c.logger.Error("failed to decode audit table", "error", err) + return errLoadAuditFailed + } + c.audit = auditTable + } + + var needPersist bool + if c.audit == nil { + c.audit = defaultAuditTable() + needPersist = true + } + + if rawLocal != nil { + if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil { + c.logger.Error("failed to decode local audit table", "error", err) + return errLoadAuditFailed + } + if localAuditTable != nil && len(localAuditTable.Entries) > 0 { + c.audit.Entries = append(c.audit.Entries, localAuditTable.Entries...) + } + } + + // Upgrade to typed auth table + if c.audit.Type == "" { + c.audit.Type = auditTableType + needPersist = true + } + + // Upgrade to table-scoped entries + for _, entry := range c.audit.Entries { + if entry.Table == "" { + entry.Table = c.audit.Type + needPersist = true + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("audit_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } + + if entry.NamespaceID == "" { + entry.NamespaceID = namespace.RootNamespaceID + needPersist = true + } + // Get the namespace from the namespace ID and load it in memory + ns, err := NamespaceByID(ctx, entry.NamespaceID, c) + if err != nil { + return err + } + if ns == nil { + return namespace.ErrNoNamespace + } + entry.namespace = ns + } + + if !needPersist || c.perfStandby { + return nil + } + + if err := c.persistAudit(ctx, c.audit, false); err != nil { + return errLoadAuditFailed + } + return nil +} + +// persistAudit is used to persist the audit table after modification +func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bool) error { + if table.Type != auditTableType { + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType) + return fmt.Errorf("invalid table type given, not persisting") + } + + for _, entry := range table.Entries { + if entry.Table != table.Type { + c.logger.Error("given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + return fmt.Errorf("invalid audit entry found, not persisting") + } + } + + nonLocalAudit := &MountTable{ + Type: auditTableType, + } + + localAudit := &MountTable{ + Type: auditTableType, + } + + for _, entry := range table.Entries { + if entry.Local { + localAudit.Entries = append(localAudit.Entries, entry) + } else { + nonLocalAudit.Entries = append(nonLocalAudit.Entries, entry) + } + } + + if !localOnly { + // Marshal the table + compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil) + if err != nil { + c.logger.Error("failed to encode and/or compress audit table", "error", err) + return err + } + + // Create an entry + entry := &logical.StorageEntry{ + Key: coreAuditConfigPath, + Value: compressedBytes, + } + + // Write to the physical backend + if err := c.barrier.Put(ctx, entry); err != nil { + c.logger.Error("failed to persist audit table", "error", err) + return err + } + } + + // Repeat with local audit + compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil) + if err != nil { + c.logger.Error("failed to encode and/or compress local audit table", "error", err) + return err + } + + entry := &logical.StorageEntry{ + Key: coreLocalAuditConfigPath, + Value: compressedBytes, + } + + if err := c.barrier.Put(ctx, entry); err != nil { + c.logger.Error("failed to persist local audit table", "error", err) + return err + } + + return nil +} + +// setupAudit is invoked after we've loaded the audit able to +// initialize the audit backends +func (c *Core) setupAudits(ctx context.Context) error { + brokerLogger := c.baseLogger.Named("audit") + c.AddLogger(brokerLogger) + broker := NewAuditBroker(brokerLogger) + + c.auditLock.Lock() + defer c.auditLock.Unlock() + + var successCount int + + for _, entry := range c.audit.Entries { + // Create a barrier view using the UUID + viewPath := entry.ViewPath() + view := NewBarrierView(c.barrier, viewPath) + addAuditPathChecker(c, entry, view, viewPath) + origViewReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + c.postUnsealFuncs = append(c.postUnsealFuncs, func() { + view.setReadOnlyErr(origViewReadOnlyErr) + }) + + // Initialize the backend + backend, err := c.newAuditBackend(ctx, entry, view, entry.Options) + if err != nil { + c.logger.Error("failed to create audit entry", "path", entry.Path, "error", err) + continue + } + if backend == nil { + c.logger.Error("created audit entry was nil", "path", entry.Path, "type", entry.Type) + continue + } + + // Mount the backend + broker.Register(entry.Path, backend, view, entry.Local) + + successCount++ + } + + if len(c.audit.Entries) > 0 && successCount == 0 { + return errLoadAuditFailed + } + + c.auditBroker = broker + return nil +} + +// teardownAudit is used before we seal the vault to reset the audit +// backends to their unloaded state. This is reversed by loadAudits. +func (c *Core) teardownAudits() error { + c.auditLock.Lock() + defer c.auditLock.Unlock() + + if c.audit != nil { + for _, entry := range c.audit.Entries { + c.removeAuditReloadFunc(entry) + removeAuditPathChecker(c, entry) + } + } + + c.audit = nil + c.auditBroker = nil + return nil +} + +// removeAuditReloadFunc removes the reload func from the working set. The +// audit lock needs to be held before calling this. +func (c *Core) removeAuditReloadFunc(entry *MountEntry) { + switch entry.Type { + case "file": + key := "audit_file|" + entry.Path + c.reloadFuncsLock.Lock() + + if c.logger.IsDebug() { + c.baseLogger.Named("audit").Debug("removing reload function", "path", entry.Path) + } + + delete(c.reloadFuncs, key) + + c.reloadFuncsLock.Unlock() + } +} + +// newAuditBackend is used to create and configure a new audit backend by name +func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logical.Storage, conf map[string]string) (audit.Backend, error) { + f, ok := c.auditBackends[entry.Type] + if !ok { + return nil, fmt.Errorf("unknown backend type: %q", entry.Type) + } + saltConfig := &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + Location: salt.DefaultLocation, + } + + be, err := f(ctx, &audit.BackendConfig{ + SaltView: view, + SaltConfig: saltConfig, + Config: conf, + }) + if err != nil { + return nil, err + } + if be == nil { + return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type) + } + + auditLogger := c.baseLogger.Named("audit") + c.AddLogger(auditLogger) + + switch entry.Type { + case "file": + key := "audit_file|" + entry.Path + + c.reloadFuncsLock.Lock() + + if auditLogger.IsDebug() { + auditLogger.Debug("adding reload function", "path", entry.Path) + if entry.Options != nil { + auditLogger.Debug("file backend options", "path", entry.Path, "file_path", entry.Options["file_path"]) + } + } + + c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error { + if auditLogger.IsInfo() { + auditLogger.Info("reloading file audit backend", "path", entry.Path) + } + return be.Reload(ctx) + }) + + c.reloadFuncsLock.Unlock() + case "socket": + if auditLogger.IsDebug() { + if entry.Options != nil { + auditLogger.Debug("socket backend options", "path", entry.Path, "address", entry.Options["address"], "socket type", entry.Options["socket_type"]) + } + } + case "syslog": + if auditLogger.IsDebug() { + if entry.Options != nil { + auditLogger.Debug("syslog backend options", "path", entry.Path, "facility", entry.Options["facility"], "tag", entry.Options["tag"]) + } + } + } + + return be, err +} + +// defaultAuditTable creates a default audit table +func defaultAuditTable() *MountTable { + table := &MountTable{ + Type: auditTableType, + } + return table +} + +type genericAuditor struct { + c *Core + mountType string + namespace *namespace.Namespace +} + +func (g genericAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error { + ctx = namespace.ContextWithNamespace(ctx, g.namespace) + logInput := *input + logInput.Type = g.mountType + "-request" + return g.c.auditBroker.LogRequest(ctx, &logInput, g.c.auditedHeaders) +} + +func (g genericAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error { + ctx = namespace.ContextWithNamespace(ctx, g.namespace) + logInput := *input + logInput.Type = g.mountType + "-response" + return g.c.auditBroker.LogResponse(ctx, &logInput, g.c.auditedHeaders) +} diff --git a/vendor/github.com/hashicorp/vault/vault/audit_broker.go b/vendor/github.com/hashicorp/vault/vault/audit_broker.go new file mode 100644 index 00000000..9440ec3f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/audit_broker.go @@ -0,0 +1,214 @@ +package vault + +import ( + "context" + "fmt" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/sdk/logical" +) + +type backendEntry struct { + backend audit.Backend + view *BarrierView + local bool +} + +// AuditBroker is used to provide a single ingest interface to auditable +// events given that multiple backends may be configured. +type AuditBroker struct { + sync.RWMutex + backends map[string]backendEntry + logger log.Logger +} + +// NewAuditBroker creates a new audit broker +func NewAuditBroker(log log.Logger) *AuditBroker { + b := &AuditBroker{ + backends: make(map[string]backendEntry), + logger: log, + } + return b +} + +// Register is used to add new audit backend to the broker +func (a *AuditBroker) Register(name string, b audit.Backend, v *BarrierView, local bool) { + a.Lock() + defer a.Unlock() + a.backends[name] = backendEntry{ + backend: b, + view: v, + local: local, + } +} + +// Deregister is used to remove an audit backend from the broker +func (a *AuditBroker) Deregister(name string) { + a.Lock() + defer a.Unlock() + delete(a.backends, name) +} + +// IsRegistered is used to check if a given audit backend is registered +func (a *AuditBroker) IsRegistered(name string) bool { + a.RLock() + defer a.RUnlock() + _, ok := a.backends[name] + return ok +} + +// IsLocal is used to check if a given audit backend is registered +func (a *AuditBroker) IsLocal(name string) (bool, error) { + a.RLock() + defer a.RUnlock() + be, ok := a.backends[name] + if ok { + return be.local, nil + } + return false, fmt.Errorf("unknown audit backend %q", name) +} + +// GetHash returns a hash using the salt of the given backend +func (a *AuditBroker) GetHash(ctx context.Context, name string, input string) (string, error) { + a.RLock() + defer a.RUnlock() + be, ok := a.backends[name] + if !ok { + return "", fmt.Errorf("unknown audit backend %q", name) + } + + return be.backend.GetHash(ctx, input) +} + +// LogRequest is used to ensure all the audit backends have an opportunity to +// log the given request and that *at least one* succeeds. +func (a *AuditBroker) LogRequest(ctx context.Context, in *logical.LogInput, headersConfig *AuditedHeadersConfig) (ret error) { + defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now()) + a.RLock() + defer a.RUnlock() + + var retErr *multierror.Error + + defer func() { + if r := recover(); r != nil { + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r) + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) + } + + ret = retErr.ErrorOrNil() + failure := float32(0.0) + if ret != nil { + failure = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_request_failure"}, failure) + }() + + // All logged requests must have an identifier + //if req.ID == "" { + // a.logger.Error("missing identifier in request object", "request_path", req.Path) + // retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path)) + // return + //} + + headers := in.Request.Headers + defer func() { + in.Request.Headers = headers + }() + + // Ensure at least one backend logs + anyLogged := false + for name, be := range a.backends { + in.Request.Headers = nil + transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash) + if thErr != nil { + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) + continue + } + in.Request.Headers = transHeaders + + start := time.Now() + lrErr := be.backend.LogRequest(ctx, in) + metrics.MeasureSince([]string{"audit", name, "log_request"}, start) + if lrErr != nil { + a.logger.Error("backend failed to log request", "backend", name, "error", lrErr) + } else { + anyLogged = true + } + } + if !anyLogged && len(a.backends) > 0 { + retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request")) + } + + return retErr.ErrorOrNil() +} + +// LogResponse is used to ensure all the audit backends have an opportunity to +// log the given response and that *at least one* succeeds. +func (a *AuditBroker) LogResponse(ctx context.Context, in *logical.LogInput, headersConfig *AuditedHeadersConfig) (ret error) { + defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now()) + a.RLock() + defer a.RUnlock() + + var retErr *multierror.Error + + defer func() { + if r := recover(); r != nil { + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r) + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) + } + + ret = retErr.ErrorOrNil() + + failure := float32(0.0) + if ret != nil { + failure = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_response_failure"}, failure) + }() + + headers := in.Request.Headers + defer func() { + in.Request.Headers = headers + }() + + // Ensure at least one backend logs + anyLogged := false + for name, be := range a.backends { + in.Request.Headers = nil + transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash) + if thErr != nil { + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) + continue + } + in.Request.Headers = transHeaders + + start := time.Now() + lrErr := be.backend.LogResponse(ctx, in) + metrics.MeasureSince([]string{"audit", name, "log_response"}, start) + if lrErr != nil { + a.logger.Error("backend failed to log response", "backend", name, "error", lrErr) + } else { + anyLogged = true + } + } + if !anyLogged && len(a.backends) > 0 { + retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response")) + } + + return retErr.ErrorOrNil() +} + +func (a *AuditBroker) Invalidate(ctx context.Context, key string) { + // For now we ignore the key as this would only apply to salts. We just + // sort of brute force it on each one. + a.Lock() + defer a.Unlock() + for _, be := range a.backends { + be.backend.Invalidate(ctx) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go new file mode 100644 index 00000000..7abe78cd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/audited_headers.go @@ -0,0 +1,162 @@ +package vault + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/logical" +) + +// N.B.: While we could use textproto to get the canonical mime header, HTTP/2 +// requires all headers to be converted to lower case, so we just do that. + +const ( + // Key used in the BarrierView to store and retrieve the header config + auditedHeadersEntry = "audited-headers" + // Path used to create a sub view off of BarrierView + auditedHeadersSubPath = "audited-headers-config/" +) + +type auditedHeaderSettings struct { + HMAC bool `json:"hmac"` +} + +// AuditedHeadersConfig is used by the Audit Broker to write only approved +// headers to the audit logs. It uses a BarrierView to persist the settings. +type AuditedHeadersConfig struct { + Headers map[string]*auditedHeaderSettings + + view *BarrierView + sync.RWMutex +} + +// add adds or overwrites a header in the config and updates the barrier view +func (a *AuditedHeadersConfig) add(ctx context.Context, header string, hmac bool) error { + if header == "" { + return fmt.Errorf("header value cannot be empty") + } + + // Grab a write lock + a.Lock() + defer a.Unlock() + + if a.Headers == nil { + a.Headers = make(map[string]*auditedHeaderSettings, 1) + } + + a.Headers[strings.ToLower(header)] = &auditedHeaderSettings{hmac} + entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers) + if err != nil { + return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + } + + if err := a.view.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + } + + return nil +} + +// remove deletes a header out of the header config and updates the barrier view +func (a *AuditedHeadersConfig) remove(ctx context.Context, header string) error { + if header == "" { + return fmt.Errorf("header value cannot be empty") + } + + // Grab a write lock + a.Lock() + defer a.Unlock() + + // Nothing to delete + if len(a.Headers) == 0 { + return nil + } + + delete(a.Headers, strings.ToLower(header)) + entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers) + if err != nil { + return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + } + + if err := a.view.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + } + + return nil +} + +// ApplyConfig returns a map of approved headers and their values, either +// hmac'ed or plaintext +func (a *AuditedHeadersConfig) ApplyConfig(ctx context.Context, headers map[string][]string, hashFunc func(context.Context, string) (string, error)) (result map[string][]string, retErr error) { + // Grab a read lock + a.RLock() + defer a.RUnlock() + + // Make a copy of the incoming headers with everything lower so we can + // case-insensitively compare + lowerHeaders := make(map[string][]string, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + result = make(map[string][]string, len(a.Headers)) + for key, settings := range a.Headers { + if val, ok := lowerHeaders[key]; ok { + // copy the header values so we don't overwrite them + hVals := make([]string, len(val)) + copy(hVals, val) + + // Optionally hmac the values + if settings.HMAC { + for i, el := range hVals { + hVal, err := hashFunc(ctx, el) + if err != nil { + return nil, err + } + hVals[i] = hVal + } + } + + result[key] = hVals + } + } + + return result, nil +} + +// Initialize the headers config by loading from the barrier view +func (c *Core) setupAuditedHeadersConfig(ctx context.Context) error { + // Create a sub-view + view := c.systemBarrierView.SubView(auditedHeadersSubPath) + + // Create the config + out, err := view.Get(ctx, auditedHeadersEntry) + if err != nil { + return errwrap.Wrapf("failed to read config: {{err}}", err) + } + + headers := make(map[string]*auditedHeaderSettings) + if out != nil { + err = out.DecodeJSON(&headers) + if err != nil { + return err + } + } + + // Ensure that we are able to case-sensitively access the headers; + // necessary for the upgrade case + lowerHeaders := make(map[string]*auditedHeaderSettings, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + c.auditedHeaders = &AuditedHeadersConfig{ + Headers: lowerHeaders, + view: view, + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go new file mode 100644 index 00000000..3d4e169d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/auth.go @@ -0,0 +1,857 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "strings" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // coreAuthConfigPath is used to store the auth configuration. + // Auth configuration is protected within the Vault itself, which means it + // can only be viewed or modified after an unseal. + coreAuthConfigPath = "core/auth" + + // coreLocalAuthConfigPath is used to store credential configuration for + // local (non-replicated) mounts + coreLocalAuthConfigPath = "core/local-auth" + + // credentialBarrierPrefix is the prefix to the UUID used in the + // barrier view for the credential backends. + credentialBarrierPrefix = "auth/" + + // credentialRoutePrefix is the mount prefix used for the router + credentialRoutePrefix = "auth/" + + // credentialTableType is the value we expect to find for the credential + // table and corresponding entries + credentialTableType = "auth" +) + +var ( + // errLoadAuthFailed if loadCredentials encounters an error + errLoadAuthFailed = errors.New("failed to setup auth table") + + // credentialAliases maps old backend names to new backend names, allowing us + // to move/rename backends but maintain backwards compatibility + credentialAliases = map[string]string{"aws-ec2": "aws"} +) + +// enableCredential is used to enable a new credential backend +func (c *Core) enableCredential(ctx context.Context, entry *MountEntry) error { + // Enable credential internally + if err := c.enableCredentialInternal(ctx, entry, MountTableUpdateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + c.logger.Error("failed to evaluate filtered paths", "error", err) + + // We failed to evaluate filtered paths so we are undoing the mount operation + if disableCredentialErr := c.disableCredentialInternal(ctx, entry.Path, MountTableUpdateStorage); disableCredentialErr != nil { + c.logger.Error("failed to disable credential", "error", disableCredentialErr) + } + return err + } + return nil +} + +// enableCredential is used to enable a new credential backend +func (c *Core) enableCredentialInternal(ctx context.Context, entry *MountEntry, updateStorage bool) error { + // Ensure we end the path in a slash + if !strings.HasSuffix(entry.Path, "/") { + entry.Path += "/" + } + + // Ensure there is a name + if entry.Path == "/" { + return fmt.Errorf("backend path must be specified") + } + + c.authLock.Lock() + defer c.authLock.Unlock() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + entry.NamespaceID = ns.ID + entry.namespace = ns + + // Populate cache + NamespaceByID(ctx, ns.ID, c) + + // Basic check for matching names + for _, ent := range c.auth.Entries { + if ns.ID == ent.NamespaceID { + switch { + // Existing is oauth/github/ new is oauth/ or + // existing is oauth/ and new is oauth/github/ + case strings.HasPrefix(ent.Path, entry.Path): + fallthrough + case strings.HasPrefix(entry.Path, ent.Path): + return logical.CodedError(409, fmt.Sprintf("path is already in use at %s", ent.Path)) + } + } + } + + // Ensure the token backend is a singleton + if entry.Type == "token" { + return fmt.Errorf("token credential backend cannot be instantiated") + } + + // Check for conflicts according to the router + if conflict := c.router.MountConflict(ctx, credentialRoutePrefix+entry.Path); conflict != "" { + return logical.CodedError(409, fmt.Sprintf("existing mount at %s", conflict)) + } + + // Generate a new UUID and view + if entry.UUID == "" { + entryUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.UUID = entryUUID + } + if entry.BackendAwareUUID == "" { + bUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.BackendAwareUUID = bUUID + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("auth_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } + // Sync values to the cache + entry.SyncCache() + + viewPath := entry.ViewPath() + view := NewBarrierView(c.barrier, viewPath) + + // Singleton mounts cannot be filtered on a per-secondary basis + // from replication + if strutil.StrListContains(singletonMounts, entry.Type) { + addFilterablePath(c, viewPath) + } + + nilMount, err := preprocessMount(c, entry, view) + if err != nil { + return err + } + origViewReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + defer view.setReadOnlyErr(origViewReadOnlyErr) + + var backend logical.Backend + // Create the new backend + sysView := c.mountEntrySysView(entry) + backend, err = c.newCredentialBackend(ctx, entry, sysView, view) + if err != nil { + return err + } + if backend == nil { + return fmt.Errorf("nil backend returned from %q factory", entry.Type) + } + + // Check for the correct backend type + backendType := backend.Type() + if backendType != logical.TypeCredential { + return fmt.Errorf("cannot mount %q of type %q as an auth backend", entry.Type, backendType) + } + + addPathCheckers(c, entry, backend, viewPath) + + // If the mount is filtered or we are on a DR secondary we don't want to + // keep the actual backend running, so we clean it up and set it to nil + // so the router does not have a pointer to the object. + if nilMount { + backend.Cleanup(ctx) + backend = nil + } + + // Update the auth table + newTable := c.auth.shallowClone() + newTable.Entries = append(newTable.Entries, entry) + if updateStorage { + if err := c.persistAuth(ctx, newTable, &entry.Local); err != nil { + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + return errors.New("failed to update auth table") + } + } + + c.auth = newTable + + if err := c.router.Mount(backend, credentialRoutePrefix+entry.Path, entry, view); err != nil { + return err + } + + if !nilMount { + // restore the original readOnlyErr, so we can write to the view in + // Initialize() if necessary + view.setReadOnlyErr(origViewReadOnlyErr) + // initialize, using the core's active context. + err := backend.Initialize(c.activeContext, &logical.InitializationRequest{Storage: view}) + if err != nil { + return err + } + } + + if c.logger.IsInfo() { + c.logger.Info("enabled credential backend", "path", entry.Path, "type", entry.Type) + } + return nil +} + +// disableCredential is used to disable an existing credential backend +func (c *Core) disableCredential(ctx context.Context, path string) error { + // Ensure we end the path in a slash + if !strings.HasSuffix(path, "/") { + path += "/" + } + + // Ensure the token backend is not affected + if path == "token/" { + return fmt.Errorf("token credential backend cannot be disabled") + } + + // Disable credential internally + if err := c.disableCredentialInternal(ctx, path, MountTableUpdateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + // Even we failed to evaluate filtered paths, the unmount operation was still successful + c.logger.Error("failed to evaluate filtered paths", "error", err) + } + return nil +} + +func (c *Core) disableCredentialInternal(ctx context.Context, path string, updateStorage bool) error { + path = credentialRoutePrefix + path + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + // Verify exact match of the route + match := c.router.MatchingMount(ctx, path) + if match == "" || ns.Path+path != match { + return fmt.Errorf("no matching mount") + } + + // Store the view for this backend + view := c.router.MatchingStorageByAPIPath(ctx, path) + if view == nil { + return fmt.Errorf("no matching backend %q", path) + } + + // Get the backend/mount entry for this path, used to remove ignored + // replication prefixes + backend := c.router.MatchingBackend(ctx, path) + entry := c.router.MatchingMountEntry(ctx, path) + + // Mark the entry as tainted + if err := c.taintCredEntry(ctx, path, updateStorage); err != nil { + return err + } + + // Taint the router path to prevent routing + if err := c.router.Taint(ctx, path); err != nil { + return err + } + + if c.expiration != nil && backend != nil { + // Revoke credentials from this path + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + revokeCtx := namespace.ContextWithNamespace(c.activeContext, ns) + if err := c.expiration.RevokePrefix(revokeCtx, path, true); err != nil { + return err + } + } + + if backend != nil { + // Call cleanup function if it exists + backend.Cleanup(ctx) + } + + viewPath := entry.ViewPath() + switch { + case !updateStorage: + // Don't attempt to clear data, replication will handle this + case c.IsDRSecondary(): + // If we are a dr secondary we want to clear the view, but the provided + // view is marked as read only. We use the barrier here to get around + // it. + + if err := logical.ClearViewWithLogging(ctx, NewBarrierView(c.barrier, viewPath), c.logger.Named("auth.deletion").With("namespace", ns.ID, "path", path)); err != nil { + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) + return err + } + + case entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): + // Have writable storage, remove the whole thing + if err := logical.ClearViewWithLogging(ctx, view, c.logger.Named("auth.deletion").With("namespace", ns.ID, "path", path)); err != nil { + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) + return err + } + + case !entry.Local && c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): + if err := clearIgnoredPaths(ctx, c, backend, viewPath); err != nil { + return err + } + } + + // Remove the mount table entry + if err := c.removeCredEntry(ctx, strings.TrimPrefix(path, credentialRoutePrefix), updateStorage); err != nil { + return err + } + + // Unmount the backend + if err := c.router.Unmount(ctx, path); err != nil { + return err + } + + removePathCheckers(c, entry, viewPath) + + if c.logger.IsInfo() { + c.logger.Info("disabled credential backend", "path", path) + } + + return nil +} + +// removeCredEntry is used to remove an entry in the auth table +func (c *Core) removeCredEntry(ctx context.Context, path string, updateStorage bool) error { + c.authLock.Lock() + defer c.authLock.Unlock() + + // Taint the entry from the auth table + newTable := c.auth.shallowClone() + entry, err := newTable.remove(ctx, path) + if err != nil { + return err + } + if entry == nil { + c.logger.Error("nil entry found removing entry in auth table", "path", path) + return logical.CodedError(500, "failed to remove entry in auth table") + } + + if updateStorage { + // Update the auth table + if err := c.persistAuth(ctx, newTable, &entry.Local); err != nil { + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + + return errors.New("failed to update auth table") + } + } + + c.auth = newTable + + return nil +} + +// remountCredEntryForceInternal takes a copy of the mount entry for the path and fully +// unmounts and remounts the backend to pick up any changes, such as filtered +// paths. This should be only used internal. +func (c *Core) remountCredEntryForceInternal(ctx context.Context, path string, updateStorage bool) error { + fullPath := credentialRoutePrefix + path + me := c.router.MatchingMountEntry(ctx, fullPath) + if me == nil { + return fmt.Errorf("cannot find mount for path %q", path) + } + + me, err := me.Clone() + if err != nil { + return err + } + + if err := c.disableCredentialInternal(ctx, path, updateStorage); err != nil { + return err + } + + // Enable credential internally + if err := c.enableCredentialInternal(ctx, me, updateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + c.logger.Error("failed to evaluate filtered paths", "error", err) + return err + } + return nil +} + +// taintCredEntry is used to mark an entry in the auth table as tainted +func (c *Core) taintCredEntry(ctx context.Context, path string, updateStorage bool) error { + c.authLock.Lock() + defer c.authLock.Unlock() + + // Taint the entry from the auth table + // We do this on the original since setting the taint operates + // on the entries which a shallow clone shares anyways + entry, err := c.auth.setTaint(ctx, strings.TrimPrefix(path, credentialRoutePrefix), true) + if err != nil { + return err + } + + // Ensure there was a match + if entry == nil { + return fmt.Errorf("no matching backend") + } + + if updateStorage { + // Update the auth table + if err := c.persistAuth(ctx, c.auth, &entry.Local); err != nil { + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + return errors.New("failed to update auth table") + } + } + + return nil +} + +// loadCredentials is invoked as part of postUnseal to load the auth table +func (c *Core) loadCredentials(ctx context.Context) error { + // Load the existing mount table + raw, err := c.barrier.Get(ctx, coreAuthConfigPath) + if err != nil { + c.logger.Error("failed to read auth table", "error", err) + return errLoadAuthFailed + } + rawLocal, err := c.barrier.Get(ctx, coreLocalAuthConfigPath) + if err != nil { + c.logger.Error("failed to read local auth table", "error", err) + return errLoadAuthFailed + } + + c.authLock.Lock() + defer c.authLock.Unlock() + + if raw != nil { + authTable, err := c.decodeMountTable(ctx, raw.Value) + if err != nil { + c.logger.Error("failed to decompress and/or decode the auth table", "error", err) + return err + } + c.auth = authTable + } + + var needPersist bool + if c.auth == nil { + c.auth = c.defaultAuthTable() + needPersist = true + } + + if rawLocal != nil { + localAuthTable, err := c.decodeMountTable(ctx, rawLocal.Value) + if err != nil { + c.logger.Error("failed to decompress and/or decode the local mount table", "error", err) + return err + } + if localAuthTable != nil && len(localAuthTable.Entries) > 0 { + c.auth.Entries = append(c.auth.Entries, localAuthTable.Entries...) + } + } + + // Upgrade to typed auth table + if c.auth.Type == "" { + c.auth.Type = credentialTableType + needPersist = true + } + + // Upgrade to table-scoped entries + for _, entry := range c.auth.Entries { + if entry.Table == "" { + entry.Table = c.auth.Type + needPersist = true + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("auth_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } + if entry.BackendAwareUUID == "" { + bUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.BackendAwareUUID = bUUID + needPersist = true + } + + if entry.NamespaceID == "" { + entry.NamespaceID = namespace.RootNamespaceID + needPersist = true + } + ns, err := NamespaceByID(ctx, entry.NamespaceID, c) + if err != nil { + return err + } + if ns == nil { + return namespace.ErrNoNamespace + } + entry.namespace = ns + + // Sync values to the cache + entry.SyncCache() + } + + if !needPersist { + return nil + } + + if err := c.persistAuth(ctx, c.auth, nil); err != nil { + c.logger.Error("failed to persist auth table", "error", err) + return errLoadAuthFailed + } + + return nil +} + +// persistAuth is used to persist the auth table after modification +func (c *Core) persistAuth(ctx context.Context, table *MountTable, local *bool) error { + if table.Type != credentialTableType { + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType) + return fmt.Errorf("invalid table type given, not persisting") + } + + for _, entry := range table.Entries { + if entry.Table != table.Type { + c.logger.Error("given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + return fmt.Errorf("invalid auth entry found, not persisting") + } + } + + nonLocalAuth := &MountTable{ + Type: credentialTableType, + } + + localAuth := &MountTable{ + Type: credentialTableType, + } + + for _, entry := range table.Entries { + if entry.Local { + localAuth.Entries = append(localAuth.Entries, entry) + } else { + nonLocalAuth.Entries = append(nonLocalAuth.Entries, entry) + } + } + + writeTable := func(mt *MountTable, path string) error { + // Encode the mount table into JSON and compress it (lzw). + compressedBytes, err := jsonutil.EncodeJSONAndCompress(mt, nil) + if err != nil { + c.logger.Error("failed to encode or compress auth mount table", "error", err) + return err + } + + // Create an entry + entry := &logical.StorageEntry{ + Key: path, + Value: compressedBytes, + } + + // Write to the physical backend + if err := c.barrier.Put(ctx, entry); err != nil { + c.logger.Error("failed to persist auth mount table", "error", err) + return err + } + return nil + } + + var err error + switch { + case local == nil: + // Write non-local mounts + err := writeTable(nonLocalAuth, coreAuthConfigPath) + if err != nil { + return err + } + + // Write local mounts + err = writeTable(localAuth, coreLocalAuthConfigPath) + if err != nil { + return err + } + case *local: + err = writeTable(localAuth, coreLocalAuthConfigPath) + default: + err = writeTable(nonLocalAuth, coreAuthConfigPath) + } + + return err +} + +// setupCredentials is invoked after we've loaded the auth table to +// initialize the credential backends and setup the router +func (c *Core) setupCredentials(ctx context.Context) error { + var persistNeeded bool + + c.authLock.Lock() + defer c.authLock.Unlock() + + for _, entry := range c.auth.sortEntriesByPathDepth().Entries { + var backend logical.Backend + + // Create a barrier view using the UUID + viewPath := entry.ViewPath() + + // Singleton mounts cannot be filtered on a per-secondary basis + // from replication + if strutil.StrListContains(singletonMounts, entry.Type) { + addFilterablePath(c, viewPath) + } + + view := NewBarrierView(c.barrier, viewPath) + + // Determining the replicated state of the mount + nilMount, err := preprocessMount(c, entry, view) + if err != nil { + return err + } + origViewReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + if strutil.StrListContains(singletonMounts, entry.Type) { + defer view.setReadOnlyErr(origViewReadOnlyErr) + } else { + c.postUnsealFuncs = append(c.postUnsealFuncs, func() { + view.setReadOnlyErr(origViewReadOnlyErr) + }) + } + + // Initialize the backend + sysView := c.mountEntrySysView(entry) + + backend, err = c.newCredentialBackend(ctx, entry, sysView, view) + if err != nil { + c.logger.Error("failed to create credential entry", "path", entry.Path, "error", err) + if !c.builtinRegistry.Contains(entry.Type, consts.PluginTypeCredential) { + // If we encounter an error instantiating the backend due to an error, + // skip backend initialization but register the entry to the mount table + // to preserve storage and path. + c.logger.Warn("skipping plugin-based credential entry", "path", entry.Path) + goto ROUTER_MOUNT + } + return errLoadAuthFailed + } + if backend == nil { + return fmt.Errorf("nil backend returned from %q factory", entry.Type) + } + + { + // Check for the correct backend type + backendType := backend.Type() + if backendType != logical.TypeCredential { + return fmt.Errorf("cannot mount %q of type %q as an auth backend", entry.Type, backendType) + } + + addPathCheckers(c, entry, backend, viewPath) + } + + // If the mount is filtered or we are on a DR secondary we don't want to + // keep the actual backend running, so we clean it up and set it to nil + // so the router does not have a pointer to the object. + if nilMount { + backend.Cleanup(ctx) + backend = nil + } + + ROUTER_MOUNT: + // Mount the backend + path := credentialRoutePrefix + entry.Path + err = c.router.Mount(backend, path, entry, view) + if err != nil { + c.logger.Error("failed to mount auth entry", "path", entry.Path, "error", err) + return errLoadAuthFailed + } + + if c.logger.IsInfo() { + c.logger.Info("successfully enabled credential backend", "type", entry.Type, "path", entry.Path) + } + + // Ensure the path is tainted if set in the mount table + if entry.Tainted { + c.router.Taint(ctx, path) + } + + // Check if this is the token store + if entry.Type == "token" { + c.tokenStore = backend.(*TokenStore) + + // At some point when this isn't beta we may persist this but for + // now always set it on mount + entry.Config.TokenType = logical.TokenTypeDefaultService + + // this is loaded *after* the normal mounts, including cubbyhole + c.router.tokenStoreSaltFunc = c.tokenStore.Salt + if !c.IsDRSecondary() { + c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, cubbyholeMountPath).(*CubbyholeBackend) + } + } + + // Populate cache + NamespaceByID(ctx, entry.NamespaceID, c) + + // Initialize + if !nilMount { + // Bind locally + localEntry := entry + c.postUnsealFuncs = append(c.postUnsealFuncs, func() { + if backend == nil { + c.logger.Error("skipping initialization on nil backend", "path", localEntry.Path) + return + } + + err := backend.Initialize(ctx, &logical.InitializationRequest{Storage: view}) + if err != nil { + c.logger.Error("failed to initialize auth entry", "path", localEntry.Path, "error", err) + } + }) + } + } + + if persistNeeded { + // persist non-local auth + return c.persistAuth(ctx, c.auth, nil) + } + + return nil +} + +// teardownCredentials is used before we seal the vault to reset the credential +// backends to their unloaded state. This is reversed by loadCredentials. +func (c *Core) teardownCredentials(ctx context.Context) error { + c.authLock.Lock() + defer c.authLock.Unlock() + + if c.auth != nil { + authTable := c.auth.shallowClone() + for _, e := range authTable.Entries { + backend := c.router.MatchingBackend(namespace.ContextWithNamespace(ctx, e.namespace), credentialRoutePrefix+e.Path) + if backend != nil { + backend.Cleanup(ctx) + } + + viewPath := e.ViewPath() + removePathCheckers(c, e, viewPath) + } + } + + c.auth = nil + c.tokenStore = nil + return nil +} + +// newCredentialBackend is used to create and configure a new credential backend by name +func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysView logical.SystemView, view logical.Storage) (logical.Backend, error) { + t := entry.Type + if alias, ok := credentialAliases[t]; ok { + t = alias + } + + f, ok := c.credentialBackends[t] + if !ok { + f = plugin.Factory + } + + // Set up conf to pass in plugin_name + conf := make(map[string]string, len(entry.Options)+1) + for k, v := range entry.Options { + conf[k] = v + } + + switch { + case entry.Type == "plugin": + conf["plugin_name"] = entry.Config.PluginName + default: + conf["plugin_name"] = t + } + + conf["plugin_type"] = consts.PluginTypeCredential.String() + + authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)) + c.AddLogger(authLogger) + config := &logical.BackendConfig{ + StorageView: view, + Logger: authLogger, + Config: conf, + System: sysView, + BackendUUID: entry.BackendAwareUUID, + } + + b, err := f(ctx, config) + if err != nil { + return nil, err + } + + return b, nil +} + +// defaultAuthTable creates a default auth table +func (c *Core) defaultAuthTable() *MountTable { + table := &MountTable{ + Type: credentialTableType, + } + tokenUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err)) + } + tokenAccessor, err := c.generateMountAccessor("auth_token") + if err != nil { + panic(fmt.Sprintf("could not generate accessor for default auth table token entry: %v", err)) + } + tokenBackendUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create identity backend UUID: %v", err)) + } + tokenAuth := &MountEntry{ + Table: credentialTableType, + Path: "token/", + Type: "token", + Description: "token based credentials", + UUID: tokenUUID, + Accessor: tokenAccessor, + BackendAwareUUID: tokenBackendUUID, + } + table.Entries = append(table.Entries, tokenAuth) + return table +} diff --git a/vendor/github.com/hashicorp/vault/vault/barrier.go b/vendor/github.com/hashicorp/vault/vault/barrier.go new file mode 100644 index 00000000..92491f76 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/barrier.go @@ -0,0 +1,176 @@ +package vault + +import ( + "context" + "errors" + "io" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + // ErrBarrierSealed is returned if an operation is performed on + // a sealed barrier. No operation is expected to succeed before unsealing + ErrBarrierSealed = errors.New("Vault is sealed") + + // ErrBarrierAlreadyInit is returned if the barrier is already + // initialized. This prevents a re-initialization. + ErrBarrierAlreadyInit = errors.New("Vault is already initialized") + + // ErrBarrierNotInit is returned if a non-initialized barrier + // is attempted to be unsealed. + ErrBarrierNotInit = errors.New("Vault is not initialized") + + // ErrBarrierInvalidKey is returned if the Unseal key is invalid + ErrBarrierInvalidKey = errors.New("Unseal failed, invalid key") +) + +const ( + // barrierInitPath is the path used to store our init sentinel file + barrierInitPath = "barrier/init" + + // keyringPath is the location of the keyring data. This is encrypted + // by the master key. + keyringPath = "core/keyring" + keyringPrefix = "core/" + + // keyringUpgradePrefix is the path used to store keyring update entries. + // When running in HA mode, the active instance will install the new key + // and re-write the keyring. For standby instances, they need an upgrade + // path from key N to N+1. They cannot just use the master key because + // in the event of a rekey, that master key can no longer decrypt the keyring. + // When key N+1 is installed, we create an entry at "prefix/N" which uses + // encryption key N to provide the N+1 key. The standby instances scan + // for this periodically and refresh their keyring. The upgrade keys + // are deleted after a few minutes, but this provides enough time for the + // standby instances to upgrade without causing any disruption. + keyringUpgradePrefix = "core/upgrade/" + + // masterKeyPath is the location of the master key. This is encrypted + // by the latest key in the keyring. This is only used by standby instances + // to handle the case of a rekey. If the active instance does a rekey, + // the standby instances can no longer reload the keyring since they + // have the old master key. This key can be decrypted if you have the + // keyring to discover the new master key. The new master key is then + // used to reload the keyring itself. + masterKeyPath = "core/master" + + // shamirKekPath is used with Shamir in v1.3+ to store a copy of the + // unseal key behind the barrier. As with masterKeyPath this is primarily + // used by standbys to handle rekeys. It also comes into play when restoring + // raft snapshots. + shamirKekPath = "core/shamir-kek" +) + +// SecurityBarrier is a critical component of Vault. It is used to wrap +// an untrusted physical backend and provide a single point of encryption, +// decryption and checksum verification. The goal is to ensure that any +// data written to the barrier is confidential and that integrity is preserved. +// As a real-world analogy, this is the steel and concrete wrapper around +// a Vault. The barrier should only be Unlockable given its key. +type SecurityBarrier interface { + // Initialized checks if the barrier has been initialized + // and has a master key set. + Initialized(ctx context.Context) (bool, error) + + // Initialize works only if the barrier has not been initialized + // and makes use of the given master key. When sealKey is provided + // it's because we're using a new-style Shamir seal, and masterKey + // is to be stored using sealKey to encrypt it. + Initialize(ctx context.Context, masterKey []byte, sealKey []byte, random io.Reader) error + + // GenerateKey is used to generate a new key + GenerateKey(io.Reader) ([]byte, error) + + // KeyLength is used to sanity check a key + KeyLength() (int, int) + + // Sealed checks if the barrier has been unlocked yet. The Barrier + // is not expected to be able to perform any CRUD until it is unsealed. + Sealed() (bool, error) + + // Unseal is used to provide the master key which permits the barrier + // to be unsealed. If the key is not correct, the barrier remains sealed. + Unseal(ctx context.Context, key []byte) error + + // VerifyMaster is used to check if the given key matches the master key + VerifyMaster(key []byte) error + + // SetMasterKey is used to directly set a new master key. This is used in + // replicated scenarios due to the chicken and egg problem of reloading the + // keyring from disk before we have the master key to decrypt it. + SetMasterKey(key []byte) error + + // ReloadKeyring is used to re-read the underlying keyring. + // This is used for HA deployments to ensure the latest keyring + // is present in the leader. + ReloadKeyring(ctx context.Context) error + + // ReloadMasterKey is used to re-read the underlying masterkey. + // This is used for HA deployments to ensure the latest master key + // is available for keyring reloading. + ReloadMasterKey(ctx context.Context) error + + // Seal is used to re-seal the barrier. This requires the barrier to + // be unsealed again to perform any further operations. + Seal() error + + // Rotate is used to create a new encryption key. All future writes + // should use the new key, while old values should still be decryptable. + Rotate(ctx context.Context, reader io.Reader) (uint32, error) + + // CreateUpgrade creates an upgrade path key to the given term from the previous term + CreateUpgrade(ctx context.Context, term uint32) error + + // DestroyUpgrade destroys the upgrade path key to the given term + DestroyUpgrade(ctx context.Context, term uint32) error + + // CheckUpgrade looks for an upgrade to the current term and installs it + CheckUpgrade(ctx context.Context) (bool, uint32, error) + + // ActiveKeyInfo is used to inform details about the active key + ActiveKeyInfo() (*KeyInfo, error) + + // Rekey is used to change the master key used to protect the keyring + Rekey(context.Context, []byte) error + + // For replication we must send over the keyring, so this must be available + Keyring() (*Keyring, error) + + // SecurityBarrier must provide the storage APIs + logical.Storage + + // SecurityBarrier must provide the encryption APIs + BarrierEncryptor +} + +// BarrierStorage is the storage only interface required for a Barrier. +type BarrierStorage interface { + // Put is used to insert or update an entry + Put(ctx context.Context, entry *logical.StorageEntry) error + + // Get is used to fetch an entry + Get(ctx context.Context, key string) (*logical.StorageEntry, error) + + // Delete is used to permanently delete an entry + Delete(ctx context.Context, key string) error + + // List is used ot list all the keys under a given + // prefix, up to the next prefix. + List(ctx context.Context, prefix string) ([]string, error) +} + +// BarrierEncryptor is the in memory only interface that does not actually +// use the underlying barrier. It is used for lower level modules like the +// Write-Ahead-Log and Merkle index to allow them to use the barrier. +type BarrierEncryptor interface { + Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) + Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error) +} + +// KeyInfo is used to convey information about the encryption key +type KeyInfo struct { + Term int + InstallTime time.Time +} diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_access.go b/vendor/github.com/hashicorp/vault/vault/barrier_access.go new file mode 100644 index 00000000..84e6e747 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/barrier_access.go @@ -0,0 +1,24 @@ +package vault + +import "context" + +// BarrierEncryptorAccess is a wrapper around BarrierEncryptor that allows Core +// to expose its barrier encrypt/decrypt operations through BarrierEncryptorAccess() +// while restricting the ability to modify Core.barrier itself. +type BarrierEncryptorAccess struct { + barrierEncryptor BarrierEncryptor +} + +var _ BarrierEncryptor = (*BarrierEncryptorAccess)(nil) + +func NewBarrierEncryptorAccess(barrierEncryptor BarrierEncryptor) *BarrierEncryptorAccess { + return &BarrierEncryptorAccess{barrierEncryptor: barrierEncryptor} +} + +func (b *BarrierEncryptorAccess) Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) { + return b.barrierEncryptor.Encrypt(ctx, key, plaintext) +} + +func (b *BarrierEncryptorAccess) Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error) { + return b.barrierEncryptor.Decrypt(ctx, key, ciphertext) +} diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go new file mode 100644 index 00000000..fa2fc22a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go @@ -0,0 +1,1033 @@ +package vault + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "go.uber.org/atomic" +) + +const ( + // initialKeyTerm is the hard coded initial key term. This is + // used only for values that are not encrypted with the keyring. + initialKeyTerm = 1 + + // termSize the number of bytes used for the key term. + termSize = 4 +) + +// Versions of the AESGCM storage methodology +const ( + AESGCMVersion1 = 0x1 + AESGCMVersion2 = 0x2 +) + +// barrierInit is the JSON encoded value stored +type barrierInit struct { + Version int // Version is the current format version + Key []byte // Key is the primary encryption key +} + +// Validate AESGCMBarrier satisfies SecurityBarrier interface +var _ SecurityBarrier = &AESGCMBarrier{} + +// AESGCMBarrier is a SecurityBarrier implementation that uses the AES +// cipher core and the Galois Counter Mode block mode. It defaults to +// the golang NONCE default value of 12 and a key size of 256 +// bit. AES-GCM is high performance, and provides both confidentiality +// and integrity. +type AESGCMBarrier struct { + backend physical.Backend + + l sync.RWMutex + sealed bool + + // keyring is used to maintain all of the encryption keys, including + // the active key used for encryption, but also prior keys to allow + // decryption of keys encrypted under previous terms. + keyring *Keyring + + // cache is used to reduce the number of AEAD constructions we do + cache map[uint32]cipher.AEAD + cacheLock sync.RWMutex + + // currentAESGCMVersionByte is prefixed to a message to allow for + // future versioning of barrier implementations. It's var instead + // of const to allow for testing + currentAESGCMVersionByte byte + + initialized atomic.Bool +} + +// NewAESGCMBarrier is used to construct a new barrier that uses +// the provided physical backend for storage. +func NewAESGCMBarrier(physical physical.Backend) (*AESGCMBarrier, error) { + b := &AESGCMBarrier{ + backend: physical, + sealed: true, + cache: make(map[uint32]cipher.AEAD), + currentAESGCMVersionByte: byte(AESGCMVersion2), + } + return b, nil +} + +// Initialized checks if the barrier has been initialized +// and has a master key set. +func (b *AESGCMBarrier) Initialized(ctx context.Context) (bool, error) { + if b.initialized.Load() { + return true, nil + } + + // Read the keyring file + keys, err := b.backend.List(ctx, keyringPrefix) + if err != nil { + return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err) + } + if strutil.StrListContains(keys, "keyring") { + b.initialized.Store(true) + return true, nil + } + + // Fallback, check for the old sentinel file + out, err := b.backend.Get(ctx, barrierInitPath) + if err != nil { + return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err) + } + b.initialized.Store(out != nil) + return out != nil, nil +} + +// Initialize works only if the barrier has not been initialized +// and makes use of the given master key. +func (b *AESGCMBarrier) Initialize(ctx context.Context, key, sealKey []byte, reader io.Reader) error { + // Verify the key size + min, max := b.KeyLength() + if len(key) < min || len(key) > max { + return fmt.Errorf("key size must be %d or %d", min, max) + } + + // Check if already initialized + if alreadyInit, err := b.Initialized(ctx); err != nil { + return err + } else if alreadyInit { + return ErrBarrierAlreadyInit + } + + // Generate encryption key + encrypt, err := b.GenerateKey(reader) + if err != nil { + return errwrap.Wrapf("failed to generate encryption key: {{err}}", err) + } + + // Create a new keyring, install the keys + keyring := NewKeyring() + keyring = keyring.SetMasterKey(key) + keyring, err = keyring.AddKey(&Key{ + Term: 1, + Version: 1, + Value: encrypt, + }) + if err != nil { + return errwrap.Wrapf("failed to create keyring: {{err}}", err) + } + + err = b.persistKeyring(ctx, keyring) + if err != nil { + return err + } + + if len(sealKey) > 0 { + primary, err := b.aeadFromKey(encrypt) + if err != nil { + return err + } + + err = b.putInternal(ctx, 1, primary, &logical.StorageEntry{ + Key: shamirKekPath, + Value: sealKey, + }) + if err != nil { + return errwrap.Wrapf("failed to store new seal key: {{err}}", err) + } + } + + return nil +} + +// persistKeyring is used to write out the keyring using the +// master key to encrypt it. +func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) error { + // Create the keyring entry + keyringBuf, err := keyring.Serialize() + defer memzero(keyringBuf) + if err != nil { + return errwrap.Wrapf("failed to serialize keyring: {{err}}", err) + } + + // Create the AES-GCM + gcm, err := b.aeadFromKey(keyring.MasterKey()) + if err != nil { + return err + } + + // Encrypt the barrier init value + value, err := b.encrypt(keyringPath, initialKeyTerm, gcm, keyringBuf) + if err != nil { + return err + } + + // Create the keyring physical entry + pe := &physical.Entry{ + Key: keyringPath, + Value: value, + } + if err := b.backend.Put(ctx, pe); err != nil { + return errwrap.Wrapf("failed to persist keyring: {{err}}", err) + } + + // Serialize the master key value + key := &Key{ + Term: 1, + Version: 1, + Value: keyring.MasterKey(), + } + keyBuf, err := key.Serialize() + defer memzero(keyBuf) + if err != nil { + return errwrap.Wrapf("failed to serialize master key: {{err}}", err) + } + + // Encrypt the master key + activeKey := keyring.ActiveKey() + aead, err := b.aeadFromKey(activeKey.Value) + if err != nil { + return err + } + value, err = b.encrypt(masterKeyPath, activeKey.Term, aead, keyBuf) + if err != nil { + return err + } + + // Update the masterKeyPath for standby instances + pe = &physical.Entry{ + Key: masterKeyPath, + Value: value, + } + if err := b.backend.Put(ctx, pe); err != nil { + return errwrap.Wrapf("failed to persist master key: {{err}}", err) + } + return nil +} + +// GenerateKey is used to generate a new key +func (b *AESGCMBarrier) GenerateKey(reader io.Reader) ([]byte, error) { + // Generate a 256bit key + buf := make([]byte, 2*aes.BlockSize) + _, err := reader.Read(buf) + + return buf, err +} + +// KeyLength is used to sanity check a key +func (b *AESGCMBarrier) KeyLength() (int, int) { + return aes.BlockSize, 2 * aes.BlockSize +} + +// Sealed checks if the barrier has been unlocked yet. The Barrier +// is not expected to be able to perform any CRUD until it is unsealed. +func (b *AESGCMBarrier) Sealed() (bool, error) { + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + return sealed, nil +} + +// VerifyMaster is used to check if the given key matches the master key +func (b *AESGCMBarrier) VerifyMaster(key []byte) error { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return ErrBarrierSealed + } + if subtle.ConstantTimeCompare(key, b.keyring.MasterKey()) != 1 { + return ErrBarrierInvalidKey + } + return nil +} + +// ReloadKeyring is used to re-read the underlying keyring. +// This is used for HA deployments to ensure the latest keyring +// is present in the leader. +func (b *AESGCMBarrier) ReloadKeyring(ctx context.Context) error { + b.l.Lock() + defer b.l.Unlock() + + // Create the AES-GCM + gcm, err := b.aeadFromKey(b.keyring.MasterKey()) + if err != nil { + return err + } + + // Read in the keyring + out, err := b.backend.Get(ctx, keyringPath) + if err != nil { + return errwrap.Wrapf("failed to check for keyring: {{err}}", err) + } + + // Ensure that the keyring exists. This should never happen, + // and indicates something really bad has happened. + if out == nil { + return errors.New("keyring unexpectedly missing") + } + + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") + } + + // Decrypt the barrier init key + plain, err := b.decrypt(keyringPath, gcm, out.Value) + defer memzero(plain) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey + } + return err + } + + // Recover the keyring + keyring, err := DeserializeKeyring(plain) + if err != nil { + return errwrap.Wrapf("keyring deserialization failed: {{err}}", err) + } + + // Setup the keyring and finish + b.cache = make(map[uint32]cipher.AEAD) + b.keyring = keyring + return nil +} + +// ReloadMasterKey is used to re-read the underlying masterkey. +// This is used for HA deployments to ensure the latest master key +// is available for keyring reloading. +func (b *AESGCMBarrier) ReloadMasterKey(ctx context.Context) error { + // Read the masterKeyPath upgrade + out, err := b.Get(ctx, masterKeyPath) + if err != nil { + return errwrap.Wrapf("failed to read master key path: {{err}}", err) + } + + // The masterKeyPath could be missing (backwards incompatible), + // we can ignore this and attempt to make progress with the current + // master key. + if out == nil { + return nil + } + + // Grab write lock and refetch + b.l.Lock() + defer b.l.Unlock() + + out, err = b.lockSwitchedGet(ctx, masterKeyPath, false) + if err != nil { + return errwrap.Wrapf("failed to read master key path: {{err}}", err) + } + + if out == nil { + return nil + } + + // Deserialize the master key + key, err := DeserializeKey(out.Value) + memzero(out.Value) + if err != nil { + return errwrap.Wrapf("failed to deserialize key: {{err}}", err) + } + + // Check if the master key is the same + if subtle.ConstantTimeCompare(b.keyring.MasterKey(), key.Value) == 1 { + return nil + } + + // Update the master key + oldKeyring := b.keyring + b.keyring = b.keyring.SetMasterKey(key.Value) + oldKeyring.Zeroize(false) + return nil +} + +// Unseal is used to provide the master key which permits the barrier +// to be unsealed. If the key is not correct, the barrier remains sealed. +func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { + b.l.Lock() + defer b.l.Unlock() + + // Do nothing if already unsealed + if !b.sealed { + return nil + } + + // Create the AES-GCM + gcm, err := b.aeadFromKey(key) + if err != nil { + return err + } + + // Read in the keyring + out, err := b.backend.Get(ctx, keyringPath) + if err != nil { + return errwrap.Wrapf("failed to check for keyring: {{err}}", err) + } + if out != nil { + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") + } + + // Decrypt the barrier init key + plain, err := b.decrypt(keyringPath, gcm, out.Value) + defer memzero(plain) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey + } + return err + } + + // Recover the keyring + keyring, err := DeserializeKeyring(plain) + if err != nil { + return errwrap.Wrapf("keyring deserialization failed: {{err}}", err) + } + + // Setup the keyring and finish + b.keyring = keyring + b.sealed = false + return nil + } + + // Read the barrier initialization key + out, err = b.backend.Get(ctx, barrierInitPath) + if err != nil { + return errwrap.Wrapf("failed to check for initialization: {{err}}", err) + } + if out == nil { + return ErrBarrierNotInit + } + + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") + } + + // Decrypt the barrier init key + plain, err := b.decrypt(barrierInitPath, gcm, out.Value) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey + } + return err + } + defer memzero(plain) + + // Unmarshal the barrier init + var init barrierInit + if err := jsonutil.DecodeJSON(plain, &init); err != nil { + return fmt.Errorf("failed to unmarshal barrier init file") + } + + // Setup a new keyring, this is for backwards compatibility + keyringNew := NewKeyring() + keyring := keyringNew.SetMasterKey(key) + + // AddKey reuses the master, so we are only zeroizing after this call + defer keyringNew.Zeroize(false) + + keyring, err = keyring.AddKey(&Key{ + Term: 1, + Version: 1, + Value: init.Key, + }) + if err != nil { + return errwrap.Wrapf("failed to create keyring: {{err}}", err) + } + if err := b.persistKeyring(ctx, keyring); err != nil { + return err + } + + // Delete the old barrier entry + if err := b.backend.Delete(ctx, barrierInitPath); err != nil { + return errwrap.Wrapf("failed to delete barrier init file: {{err}}", err) + } + + // Set the vault as unsealed + b.keyring = keyring + b.sealed = false + return nil +} + +// Seal is used to re-seal the barrier. This requires the barrier to +// be unsealed again to perform any further operations. +func (b *AESGCMBarrier) Seal() error { + b.l.Lock() + defer b.l.Unlock() + + // Remove the primary key, and seal the vault + b.cache = make(map[uint32]cipher.AEAD) + b.keyring.Zeroize(true) + b.keyring = nil + b.sealed = true + return nil +} + +// Rotate is used to create a new encryption key. All future writes +// should use the new key, while old values should still be decryptable. +func (b *AESGCMBarrier) Rotate(ctx context.Context, reader io.Reader) (uint32, error) { + b.l.Lock() + defer b.l.Unlock() + if b.sealed { + return 0, ErrBarrierSealed + } + + // Generate a new key + encrypt, err := b.GenerateKey(reader) + if err != nil { + return 0, errwrap.Wrapf("failed to generate encryption key: {{err}}", err) + } + + // Get the next term + term := b.keyring.ActiveTerm() + newTerm := term + 1 + + // Add a new encryption key + newKeyring, err := b.keyring.AddKey(&Key{ + Term: newTerm, + Version: 1, + Value: encrypt, + }) + if err != nil { + return 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err) + } + + // Persist the new keyring + if err := b.persistKeyring(ctx, newKeyring); err != nil { + return 0, err + } + + // Swap the keyrings + b.keyring = newKeyring + return newTerm, nil +} + +// CreateUpgrade creates an upgrade path key to the given term from the previous term +func (b *AESGCMBarrier) CreateUpgrade(ctx context.Context, term uint32) error { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return ErrBarrierSealed + } + + // Get the key for this term + termKey := b.keyring.TermKey(term) + buf, err := termKey.Serialize() + defer memzero(buf) + if err != nil { + b.l.RUnlock() + return err + } + + // Get the AEAD for the previous term + prevTerm := term - 1 + primary, err := b.aeadForTerm(prevTerm) + if err != nil { + b.l.RUnlock() + return err + } + + key := fmt.Sprintf("%s%d", keyringUpgradePrefix, prevTerm) + value, err := b.encrypt(key, prevTerm, primary, buf) + b.l.RUnlock() + if err != nil { + return err + } + // Create upgrade key + pe := &physical.Entry{ + Key: key, + Value: value, + } + return b.backend.Put(ctx, pe) +} + +// DestroyUpgrade destroys the upgrade path key to the given term +func (b *AESGCMBarrier) DestroyUpgrade(ctx context.Context, term uint32) error { + path := fmt.Sprintf("%s%d", keyringUpgradePrefix, term-1) + return b.Delete(ctx, path) +} + +// CheckUpgrade looks for an upgrade to the current term and installs it +func (b *AESGCMBarrier) CheckUpgrade(ctx context.Context) (bool, uint32, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return false, 0, ErrBarrierSealed + } + + // Get the current term + activeTerm := b.keyring.ActiveTerm() + + // Check for an upgrade key + upgrade := fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm) + entry, err := b.lockSwitchedGet(ctx, upgrade, false) + if err != nil { + b.l.RUnlock() + return false, 0, err + } + + // Nothing to do if no upgrade + if entry == nil { + b.l.RUnlock() + return false, 0, nil + } + + // Upgrade from read lock to write lock + b.l.RUnlock() + b.l.Lock() + defer b.l.Unlock() + + // Validate base cases and refetch values again + + if b.sealed { + return false, 0, ErrBarrierSealed + } + + activeTerm = b.keyring.ActiveTerm() + + upgrade = fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm) + entry, err = b.lockSwitchedGet(ctx, upgrade, false) + if err != nil { + return false, 0, err + } + + if entry == nil { + return false, 0, nil + } + + // Deserialize the key + key, err := DeserializeKey(entry.Value) + memzero(entry.Value) + if err != nil { + return false, 0, err + } + + // Update the keyring + newKeyring, err := b.keyring.AddKey(key) + if err != nil { + return false, 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err) + } + b.keyring = newKeyring + + // Done! + return true, key.Term, nil +} + +// ActiveKeyInfo is used to inform details about the active key +func (b *AESGCMBarrier) ActiveKeyInfo() (*KeyInfo, error) { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return nil, ErrBarrierSealed + } + + // Determine the key install time + term := b.keyring.ActiveTerm() + key := b.keyring.TermKey(term) + + // Return the key info + info := &KeyInfo{ + Term: int(term), + InstallTime: key.InstallTime, + } + return info, nil +} + +// Rekey is used to change the master key used to protect the keyring +func (b *AESGCMBarrier) Rekey(ctx context.Context, key []byte) error { + b.l.Lock() + defer b.l.Unlock() + + newKeyring, err := b.updateMasterKeyCommon(key) + if err != nil { + return err + } + + // Persist the new keyring + if err := b.persistKeyring(ctx, newKeyring); err != nil { + return err + } + + // Swap the keyrings + oldKeyring := b.keyring + b.keyring = newKeyring + oldKeyring.Zeroize(false) + return nil +} + +// SetMasterKey updates the keyring's in-memory master key but does not persist +// anything to storage +func (b *AESGCMBarrier) SetMasterKey(key []byte) error { + b.l.Lock() + defer b.l.Unlock() + + newKeyring, err := b.updateMasterKeyCommon(key) + if err != nil { + return err + } + + // Swap the keyrings + oldKeyring := b.keyring + b.keyring = newKeyring + oldKeyring.Zeroize(false) + return nil +} + +// Performs common tasks related to updating the master key; note that the lock +// must be held before calling this function +func (b *AESGCMBarrier) updateMasterKeyCommon(key []byte) (*Keyring, error) { + if b.sealed { + return nil, ErrBarrierSealed + } + + // Verify the key size + min, max := b.KeyLength() + if len(key) < min || len(key) > max { + return nil, fmt.Errorf("key size must be %d or %d", min, max) + } + + return b.keyring.SetMasterKey(key), nil +} + +// Put is used to insert or update an entry +func (b *AESGCMBarrier) Put(ctx context.Context, entry *logical.StorageEntry) error { + defer metrics.MeasureSince([]string{"barrier", "put"}, time.Now()) + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return ErrBarrierSealed + } + + term := b.keyring.ActiveTerm() + primary, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return err + } + + return b.putInternal(ctx, term, primary, entry) +} + +func (b *AESGCMBarrier) putInternal(ctx context.Context, term uint32, primary cipher.AEAD, entry *logical.StorageEntry) error { + value, err := b.encrypt(entry.Key, term, primary, entry.Value) + if err != nil { + return err + } + pe := &physical.Entry{ + Key: entry.Key, + Value: value, + SealWrap: entry.SealWrap, + } + return b.backend.Put(ctx, pe) +} + +// Get is used to fetch an entry +func (b *AESGCMBarrier) Get(ctx context.Context, key string) (*logical.StorageEntry, error) { + return b.lockSwitchedGet(ctx, key, true) +} + +func (b *AESGCMBarrier) lockSwitchedGet(ctx context.Context, key string, getLock bool) (*logical.StorageEntry, error) { + defer metrics.MeasureSince([]string{"barrier", "get"}, time.Now()) + if getLock { + b.l.RLock() + } + if b.sealed { + if getLock { + b.l.RUnlock() + } + return nil, ErrBarrierSealed + } + + // Read the key from the backend + pe, err := b.backend.Get(ctx, key) + if err != nil { + if getLock { + b.l.RUnlock() + } + return nil, err + } else if pe == nil { + if getLock { + b.l.RUnlock() + } + return nil, nil + } + + if len(pe.Value) < 4 { + if getLock { + b.l.RUnlock() + } + return nil, errors.New("invalid value") + } + + // Verify the term + term := binary.BigEndian.Uint32(pe.Value[:4]) + + // Get the GCM by term + // It is expensive to do this first but it is not a + // normal case that this won't match + gcm, err := b.aeadForTerm(term) + if getLock { + b.l.RUnlock() + } + if err != nil { + return nil, err + } + if gcm == nil { + return nil, fmt.Errorf("no decryption key available for term %d", term) + } + + // Decrypt the ciphertext + plain, err := b.decrypt(key, gcm, pe.Value) + if err != nil { + return nil, errwrap.Wrapf("decryption failed: {{err}}", err) + } + + // Wrap in a logical entry + entry := &logical.StorageEntry{ + Key: key, + Value: plain, + SealWrap: pe.SealWrap, + } + return entry, nil +} + +// Delete is used to permanently delete an entry +func (b *AESGCMBarrier) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"barrier", "delete"}, time.Now()) + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + if sealed { + return ErrBarrierSealed + } + + return b.backend.Delete(ctx, key) +} + +// List is used ot list all the keys under a given +// prefix, up to the next prefix. +func (b *AESGCMBarrier) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"barrier", "list"}, time.Now()) + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + if sealed { + return nil, ErrBarrierSealed + } + + return b.backend.List(ctx, prefix) +} + +// aeadForTerm returns the AES-GCM AEAD for the given term +func (b *AESGCMBarrier) aeadForTerm(term uint32) (cipher.AEAD, error) { + // Check for the keyring + keyring := b.keyring + if keyring == nil { + return nil, nil + } + + // Check the cache for the aead + b.cacheLock.RLock() + aead, ok := b.cache[term] + b.cacheLock.RUnlock() + if ok { + return aead, nil + } + + // Read the underlying key + key := keyring.TermKey(term) + if key == nil { + return nil, nil + } + + // Create a new aead + aead, err := b.aeadFromKey(key.Value) + if err != nil { + return nil, err + } + + // Update the cache + b.cacheLock.Lock() + b.cache[term] = aead + b.cacheLock.Unlock() + return aead, nil +} + +// aeadFromKey returns an AES-GCM AEAD using the given key. +func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) { + // Create the AES cipher + aesCipher, err := aes.NewCipher(key) + if err != nil { + return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err) + } + + // Create the GCM mode AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, fmt.Errorf("failed to initialize GCM mode") + } + return gcm, nil +} + +// encrypt is used to encrypt a value +func (b *AESGCMBarrier) encrypt(path string, term uint32, gcm cipher.AEAD, plain []byte) ([]byte, error) { + // Allocate the output buffer with room for tern, version byte, + // nonce, GCM tag and the plaintext + capacity := termSize + 1 + gcm.NonceSize() + gcm.Overhead() + len(plain) + size := termSize + 1 + gcm.NonceSize() + out := make([]byte, size, capacity) + + // Set the key term + binary.BigEndian.PutUint32(out[:4], term) + + // Set the version byte + out[4] = b.currentAESGCMVersionByte + + // Generate a random nonce + nonce := out[5 : 5+gcm.NonceSize()] + n, err := rand.Read(nonce) + if err != nil { + return nil, err + } + if n != len(nonce) { + return nil, errors.New("unable to read enough random bytes to fill gcm nonce") + } + + // Seal the output + switch b.currentAESGCMVersionByte { + case AESGCMVersion1: + out = gcm.Seal(out, nonce, plain, nil) + case AESGCMVersion2: + aad := []byte(nil) + if path != "" { + aad = []byte(path) + } + out = gcm.Seal(out, nonce, plain, aad) + default: + panic("Unknown AESGCM version") + } + + return out, nil +} + +// decrypt is used to decrypt a value using the keyring +func (b *AESGCMBarrier) decrypt(path string, gcm cipher.AEAD, cipher []byte) ([]byte, error) { + // Capture the parts + nonce := cipher[5 : 5+gcm.NonceSize()] + raw := cipher[5+gcm.NonceSize():] + out := make([]byte, 0, len(raw)-gcm.NonceSize()) + + // Attempt to open + switch cipher[4] { + case AESGCMVersion1: + return gcm.Open(out, nonce, raw, nil) + case AESGCMVersion2: + aad := []byte(nil) + if path != "" { + aad = []byte(path) + } + return gcm.Open(out, nonce, raw, aad) + default: + return nil, fmt.Errorf("version bytes mis-match") + } +} + +// Encrypt is used to encrypt in-memory for the BarrierEncryptor interface +func (b *AESGCMBarrier) Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return nil, ErrBarrierSealed + } + + term := b.keyring.ActiveTerm() + primary, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return nil, err + } + + ciphertext, err := b.encrypt(key, term, primary, plaintext) + if err != nil { + return nil, err + } + return ciphertext, nil +} + +// Decrypt is used to decrypt in-memory for the BarrierEncryptor interface +func (b *AESGCMBarrier) Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return nil, ErrBarrierSealed + } + + // Verify the term + term := binary.BigEndian.Uint32(ciphertext[:4]) + + // Get the GCM by term + // It is expensive to do this first but it is not a + // normal case that this won't match + gcm, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return nil, err + } + if gcm == nil { + return nil, fmt.Errorf("no decryption key available for term %d", term) + } + + // Decrypt the ciphertext + plain, err := b.decrypt(key, gcm, ciphertext) + if err != nil { + return nil, errwrap.Wrapf("decryption failed: {{err}}", err) + } + + return plain, nil +} + +func (b *AESGCMBarrier) Keyring() (*Keyring, error) { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return nil, ErrBarrierSealed + } + + return b.keyring.Clone(), nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go new file mode 100644 index 00000000..7fa0e7fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/barrier_view.go @@ -0,0 +1,100 @@ +package vault + +import ( + "context" + "errors" + "sync" + + "github.com/hashicorp/vault/sdk/logical" +) + +// BarrierView wraps a SecurityBarrier and ensures all access is automatically +// prefixed. This is used to prevent anyone with access to the view to access +// any data in the durable storage outside of their prefix. Conceptually this +// is like a "chroot" into the barrier. +// +// BarrierView implements logical.Storage so it can be passed in as the +// durable storage mechanism for logical views. +type BarrierView struct { + storage *logical.StorageView + readOnlyErr error + readOnlyErrLock sync.RWMutex + iCheck interface{} +} + +// NewBarrierView takes an underlying security barrier and returns +// a view of it that can only operate with the given prefix. +func NewBarrierView(barrier logical.Storage, prefix string) *BarrierView { + return &BarrierView{ + storage: logical.NewStorageView(barrier, prefix), + } +} + +func (v *BarrierView) setICheck(iCheck interface{}) { + v.iCheck = iCheck +} + +func (v *BarrierView) setReadOnlyErr(readOnlyErr error) { + v.readOnlyErrLock.Lock() + defer v.readOnlyErrLock.Unlock() + v.readOnlyErr = readOnlyErr +} + +func (v *BarrierView) getReadOnlyErr() error { + v.readOnlyErrLock.RLock() + defer v.readOnlyErrLock.RUnlock() + return v.readOnlyErr +} + +func (v *BarrierView) Prefix() string { + return v.storage.Prefix() +} + +func (v *BarrierView) List(ctx context.Context, prefix string) ([]string, error) { + return v.storage.List(ctx, prefix) +} + +func (v *BarrierView) Get(ctx context.Context, key string) (*logical.StorageEntry, error) { + return v.storage.Get(ctx, key) +} + +// Put differs from List/Get because it checks read-only errors +func (v *BarrierView) Put(ctx context.Context, entry *logical.StorageEntry) error { + if entry == nil { + return errors.New("cannot write nil entry") + } + + expandedKey := v.storage.ExpandKey(entry.Key) + + roErr := v.getReadOnlyErr() + if roErr != nil { + if runICheck(v, expandedKey, roErr) { + return roErr + } + } + + return v.storage.Put(ctx, entry) +} + +// logical.Storage impl. +func (v *BarrierView) Delete(ctx context.Context, key string) error { + expandedKey := v.storage.ExpandKey(key) + + roErr := v.getReadOnlyErr() + if roErr != nil { + if runICheck(v, expandedKey, roErr) { + return roErr + } + } + + return v.storage.Delete(ctx, key) +} + +// SubView constructs a nested sub-view using the given prefix +func (v *BarrierView) SubView(prefix string) *BarrierView { + return &BarrierView{ + storage: v.storage.SubView(prefix), + readOnlyErr: v.getReadOnlyErr(), + iCheck: v.iCheck, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go b/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go new file mode 100644 index 00000000..f7c63405 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go @@ -0,0 +1,5 @@ +// +build !enterprise + +package vault + +func runICheck(v *BarrierView, expandedKey string, roErr error) bool { return true } diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities.go b/vendor/github.com/hashicorp/vault/vault/capabilities.go new file mode 100644 index 00000000..51b79f51 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/capabilities.go @@ -0,0 +1,76 @@ +package vault + +import ( + "context" + "sort" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +// Capabilities is used to fetch the capabilities of the given token on the +// given path +func (c *Core) Capabilities(ctx context.Context, token, path string) ([]string, error) { + if path == "" { + return nil, &logical.StatusBadRequest{Err: "missing path"} + } + + if token == "" { + return nil, &logical.StatusBadRequest{Err: "missing token"} + } + + te, err := c.tokenStore.Lookup(ctx, token) + if err != nil { + return nil, err + } + if te == nil { + return nil, &logical.StatusBadRequest{Err: "invalid token"} + } + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + var policyCount int + policyNames := make(map[string][]string) + policyNames[tokenNS.ID] = te.Policies + policyCount += len(te.Policies) + + entity, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID) + if err != nil { + return nil, err + } + if entity != nil && entity.Disabled { + c.logger.Warn("permission denied as the entity on the token is disabled") + return nil, logical.ErrPermissionDenied + } + if te.EntityID != "" && entity == nil { + c.logger.Warn("permission denied as the entity on the token is invalid") + return nil, logical.ErrPermissionDenied + } + + for nsID, nsPolicies := range identityPolicies { + policyNames[nsID] = append(policyNames[nsID], nsPolicies...) + policyCount += len(nsPolicies) + } + + if policyCount == 0 { + return []string{DenyCapability}, nil + } + + // Construct the corresponding ACL object. ACL construction should be + // performed on the token's namespace. + tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS) + acl, err := c.policyStore.ACL(tokenCtx, entity, policyNames) + if err != nil { + return nil, err + } + + capabilities := acl.Capabilities(ctx, path) + sort.Strings(capabilities) + return capabilities, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go new file mode 100644 index 00000000..9ba85f6a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/cluster.go @@ -0,0 +1,391 @@ +package vault + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "errors" + "fmt" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "strings" + "time" + + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault/cluster" +) + +const ( + // Storage path where the local cluster name and identifier are stored + coreLocalClusterInfoPath = "core/cluster/local/info" + + corePrivateKeyTypeP521 = "p521" + corePrivateKeyTypeED25519 = "ed25519" + + // Internal so as not to log a trace message + IntNoForwardingHeaderName = "X-Vault-Internal-No-Request-Forwarding" +) + +var ( + ErrCannotForward = errors.New("cannot forward request; no connection or address not known") + ErrCannotForwardLocalOnly = errors.New("cannot forward local-only request") +) + +type ClusterLeaderParams struct { + LeaderUUID string + LeaderRedirectAddr string + LeaderClusterAddr string +} + +// Structure representing the storage entry that holds cluster information +type Cluster struct { + // Name of the cluster + Name string `json:"name" structs:"name" mapstructure:"name"` + + // Identifier of the cluster + ID string `json:"id" structs:"id" mapstructure:"id"` +} + +// Cluster fetches the details of the local cluster. This method errors out +// when Vault is sealed. +func (c *Core) Cluster(ctx context.Context) (*Cluster, error) { + var cluster Cluster + + // Fetch the storage entry. This call fails when Vault is sealed. + entry, err := c.barrier.Get(ctx, coreLocalClusterInfoPath) + if err != nil { + return nil, err + } + if entry == nil { + return &cluster, nil + } + + // Decode the cluster information + if err = jsonutil.DecodeJSON(entry.Value, &cluster); err != nil { + return nil, errwrap.Wrapf("failed to decode cluster details: {{err}}", err) + } + + // Set in config file + if c.clusterName != "" { + cluster.Name = c.clusterName + } + + return &cluster, nil +} + +// This sets our local cluster cert and private key based on the advertisement. +// It also ensures the cert is in our local cluster cert pool. +func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) { + defer func() { + if retErr != nil { + c.localClusterCert.Store(([]byte)(nil)) + c.localClusterParsedCert.Store((*x509.Certificate)(nil)) + c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil)) + + c.requestForwardingConnectionLock.Lock() + c.clearForwardingClients() + c.requestForwardingConnectionLock.Unlock() + } + }() + + switch { + case adv.ClusterAddr == "": + // Clustering disabled on the server, don't try to look for params + return nil + + case adv.ClusterKeyParams == nil: + c.logger.Error("no key params found loading local cluster TLS information") + return fmt.Errorf("no local cluster key params found") + + case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil: + c.logger.Error("failed to parse local cluster key due to missing params") + return fmt.Errorf("failed to parse local cluster key") + + case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521: + c.logger.Error("unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type) + return fmt.Errorf("failed to find valid local cluster key type") + + case adv.ClusterCert == nil || len(adv.ClusterCert) == 0: + c.logger.Error("no local cluster cert found") + return fmt.Errorf("no local cluster cert found") + + } + + c.localClusterPrivateKey.Store(&ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: adv.ClusterKeyParams.X, + Y: adv.ClusterKeyParams.Y, + }, + D: adv.ClusterKeyParams.D, + }) + + locCert := make([]byte, len(adv.ClusterCert)) + copy(locCert, adv.ClusterCert) + c.localClusterCert.Store(locCert) + + cert, err := x509.ParseCertificate(adv.ClusterCert) + if err != nil { + c.logger.Error("failed parsing local cluster certificate", "error", err) + return errwrap.Wrapf("error parsing local cluster certificate: {{err}}", err) + } + + c.localClusterParsedCert.Store(cert) + + return nil +} + +// setupCluster creates storage entries for holding Vault cluster information. +// Entries will be created only if they are not already present. If clusterName +// is not supplied, this method will auto-generate it. +func (c *Core) setupCluster(ctx context.Context) error { + // Prevent data races with the TLS parameters + c.clusterParamsLock.Lock() + defer c.clusterParamsLock.Unlock() + + // Check if storage index is already present or not + cluster, err := c.Cluster(ctx) + if err != nil { + c.logger.Error("failed to get cluster details", "error", err) + return err + } + + var modified bool + + if cluster == nil { + cluster = &Cluster{} + } + + if cluster.Name == "" { + // If cluster name is not supplied, generate one + if c.clusterName == "" { + c.logger.Debug("cluster name not found/set, generating new") + clusterNameBytes, err := uuid.GenerateRandomBytes(4) + if err != nil { + c.logger.Error("failed to generate cluster name", "error", err) + return err + } + + c.clusterName = fmt.Sprintf("vault-cluster-%08x", clusterNameBytes) + } + + cluster.Name = c.clusterName + if c.logger.IsDebug() { + c.logger.Debug("cluster name set", "name", cluster.Name) + } + modified = true + } + + if cluster.ID == "" { + c.logger.Debug("cluster ID not found, generating new") + // Generate a clusterID + cluster.ID, err = uuid.GenerateUUID() + if err != nil { + c.logger.Error("failed to generate cluster identifier", "error", err) + return err + } + if c.logger.IsDebug() { + c.logger.Debug("cluster ID set", "id", cluster.ID) + } + modified = true + } + + // If we're using HA, generate server-to-server parameters + if c.ha != nil { + // Create a private key + if c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey) == nil { + c.logger.Debug("generating cluster private key") + key, err := ecdsa.GenerateKey(elliptic.P521(), c.secureRandomReader) + if err != nil { + c.logger.Error("failed to generate local cluster key", "error", err) + return err + } + + c.localClusterPrivateKey.Store(key) + } + + // Create a certificate + if c.localClusterCert.Load().([]byte) == nil { + c.logger.Debug("generating local cluster certificate") + + host, err := uuid.GenerateUUID() + if err != nil { + return err + } + host = fmt.Sprintf("fw-%s", host) + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + // 30 years of single-active uptime ought to be enough for anybody + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey).Public(), c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)) + if err != nil { + c.logger.Error("error generating self-signed cert", "error", err) + return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) + } + + parsedCert, err := x509.ParseCertificate(certBytes) + if err != nil { + c.logger.Error("error parsing self-signed cert", "error", err) + return errwrap.Wrapf("error parsing generated certificate: {{err}}", err) + } + + c.localClusterCert.Store(certBytes) + c.localClusterParsedCert.Store(parsedCert) + } + } + + if modified { + // Encode the cluster information into as a JSON string + rawCluster, err := json.Marshal(cluster) + if err != nil { + c.logger.Error("failed to encode cluster details", "error", err) + return err + } + + // Store it + err = c.barrier.Put(ctx, &logical.StorageEntry{ + Key: coreLocalClusterInfoPath, + Value: rawCluster, + }) + if err != nil { + c.logger.Error("failed to store cluster details", "error", err) + return err + } + } + + return nil +} + +// startClusterListener starts cluster request listeners during unseal. It +// is assumed that the state lock is held while this is run. Right now this +// only starts cluster listeners. Once the listener is started handlers/clients +// can start being registered to it. +func (c *Core) startClusterListener(ctx context.Context) error { + if c.ClusterAddr() == "" { + c.logger.Info("clustering disabled, not starting listeners") + return nil + } + + if c.getClusterListener() != nil { + c.logger.Warn("cluster listener is already started") + return nil + } + + if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 { + c.logger.Warn("clustering not disabled but no addresses to listen on") + return fmt.Errorf("cluster addresses not found") + } + + c.logger.Debug("starting cluster listeners") + + c.clusterListener.Store(cluster.NewListener(c.clusterListenerAddrs, c.clusterCipherSuites, c.logger.Named("cluster-listener"))) + + err := c.getClusterListener().Run(ctx) + if err != nil { + return err + } + if strings.HasSuffix(c.ClusterAddr(), ":0") { + // If we listened on port 0, record the port the OS gave us. + c.clusterAddr.Store(fmt.Sprintf("https://%s", c.getClusterListener().Addrs()[0])) + } + return nil +} + +func (c *Core) ClusterAddr() string { + return c.clusterAddr.Load().(string) +} + +func (c *Core) getClusterListener() *cluster.Listener { + cl := c.clusterListener.Load() + if cl == nil { + return nil + } + return cl.(*cluster.Listener) +} + +// stopClusterListener stops any existing listeners during seal. It is +// assumed that the state lock is held while this is run. +func (c *Core) stopClusterListener() { + clusterListener := c.getClusterListener() + if clusterListener == nil { + c.logger.Debug("clustering disabled, not stopping listeners") + return + } + + c.logger.Info("stopping cluster listeners") + + clusterListener.Stop() + var nilCL *cluster.Listener + c.clusterListener.Store(nilCL) + + c.logger.Info("cluster listeners successfully shut down") +} + +func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) { + c.clusterListenerAddrs = addrs + if c.ClusterAddr() == "" && len(addrs) == 1 { + c.clusterAddr.Store(fmt.Sprintf("https://%s", addrs[0].String())) + } +} + +func (c *Core) SetClusterHandler(handler http.Handler) { + c.clusterHandler = handler +} + +// getGRPCDialer is used to return a dialer that has the correct TLS +// configuration. Otherwise gRPC tries to be helpful and stomps all over our +// NextProtos. +func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, caCert *x509.Certificate) func(string, time.Duration) (net.Conn, error) { + return func(addr string, timeout time.Duration) (net.Conn, error) { + clusterListener := c.getClusterListener() + if clusterListener == nil { + return nil, errors.New("clustering disabled") + } + + tlsConfig, err := clusterListener.TLSConfig(ctx) + if err != nil { + c.logger.Error("failed to get tls configuration", "error", err) + return nil, err + } + if serverName != "" { + tlsConfig.ServerName = serverName + } + if caCert != nil { + pool := x509.NewCertPool() + pool.AddCert(caCert) + tlsConfig.RootCAs = pool + tlsConfig.ClientCAs = pool + } + c.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName) + + tlsConfig.NextProtos = []string{alpnProto} + dialer := &net.Dialer{ + Timeout: timeout, + } + return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/cluster/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster/cluster.go new file mode 100644 index 00000000..2e6de3df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/cluster/cluster.go @@ -0,0 +1,367 @@ +package cluster + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/consts" + "golang.org/x/net/http2" +) + +var ( + // Making this a package var allows tests to modify + HeartbeatInterval = 5 * time.Second +) + +const ( + ListenerAcceptDeadline = 500 * time.Millisecond +) + +// Client is used to lookup a client certificate. +type Client interface { + ClientLookup(context.Context, *tls.CertificateRequestInfo) (*tls.Certificate, error) +} + +// Handler exposes functions for looking up TLS configuration and handing +// off a connection for a cluster listener application. +type Handler interface { + ServerLookup(context.Context, *tls.ClientHelloInfo) (*tls.Certificate, error) + CALookup(context.Context) ([]*x509.Certificate, error) + + // Handoff is used to pass the connection lifetime off to + // the handler + Handoff(context.Context, *sync.WaitGroup, chan struct{}, *tls.Conn) error + Stop() error +} + +type ClusterHook interface { + AddClient(alpn string, client Client) + RemoveClient(alpn string) + AddHandler(alpn string, handler Handler) + StopHandler(alpn string) + TLSConfig(ctx context.Context) (*tls.Config, error) + Addr() net.Addr +} + +// Listener is the source of truth for cluster handlers and connection +// clients. It dynamically builds the cluster TLS information. It's also +// responsible for starting tcp listeners and accepting new cluster connections. +type Listener struct { + handlers map[string]Handler + clients map[string]Client + shutdown *uint32 + shutdownWg *sync.WaitGroup + server *http2.Server + + listenerAddrs []*net.TCPAddr + cipherSuites []uint16 + logger log.Logger + l sync.RWMutex +} + +func NewListener(addrs []*net.TCPAddr, cipherSuites []uint16, logger log.Logger) *Listener { + // Create the HTTP/2 server that will be shared by both RPC and regular + // duties. Doing it this way instead of listening via the server and gRPC + // allows us to re-use the same port via ALPN. We can just tell the server + // to serve a given conn and which handler to use. + h2Server := &http2.Server{ + // Our forwarding connections heartbeat regularly so anything else we + // want to go away/get cleaned up pretty rapidly + IdleTimeout: 5 * HeartbeatInterval, + } + + return &Listener{ + handlers: make(map[string]Handler), + clients: make(map[string]Client), + shutdown: new(uint32), + shutdownWg: &sync.WaitGroup{}, + server: h2Server, + + listenerAddrs: addrs, + cipherSuites: cipherSuites, + logger: logger, + } +} + +// TODO: This probably isn't correct +func (cl *Listener) Addr() net.Addr { + return cl.listenerAddrs[0] +} + +func (cl *Listener) Addrs() []*net.TCPAddr { + return cl.listenerAddrs +} + +// AddClient adds a new client for an ALPN name +func (cl *Listener) AddClient(alpn string, client Client) { + cl.l.Lock() + cl.clients[alpn] = client + cl.l.Unlock() +} + +// RemoveClient removes the client for the specified ALPN name +func (cl *Listener) RemoveClient(alpn string) { + cl.l.Lock() + delete(cl.clients, alpn) + cl.l.Unlock() +} + +// AddHandler registers a new cluster handler for the provided ALPN name. +func (cl *Listener) AddHandler(alpn string, handler Handler) { + cl.l.Lock() + cl.handlers[alpn] = handler + cl.l.Unlock() +} + +// StopHandler stops the cluster handler for the provided ALPN name, it also +// calls stop on the handler. +func (cl *Listener) StopHandler(alpn string) { + cl.l.Lock() + handler, ok := cl.handlers[alpn] + delete(cl.handlers, alpn) + cl.l.Unlock() + if ok { + handler.Stop() + } +} + +// Handler returns the handler for the provided ALPN name +func (cl *Listener) Handler(alpn string) (Handler, bool) { + cl.l.RLock() + handler, ok := cl.handlers[alpn] + cl.l.RUnlock() + return handler, ok +} + +// Server returns the http2 server that the cluster listener is using +func (cl *Listener) Server() *http2.Server { + return cl.server +} + +// TLSConfig returns a tls config object that uses dynamic lookups to correctly +// authenticate registered handlers/clients +func (cl *Listener) TLSConfig(ctx context.Context) (*tls.Config, error) { + serverLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cl.logger.Debug("performing server cert lookup") + + cl.l.RLock() + defer cl.l.RUnlock() + for _, v := range clientHello.SupportedProtos { + if handler, ok := cl.handlers[v]; ok { + return handler.ServerLookup(ctx, clientHello) + } + } + + cl.logger.Warn("no TLS certs found for ALPN", "ALPN", clientHello.SupportedProtos) + return nil, errors.New("unsupported protocol") + } + + clientLookup := func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cl.logger.Debug("performing client cert lookup") + + cl.l.RLock() + defer cl.l.RUnlock() + for _, client := range cl.clients { + cert, err := client.ClientLookup(ctx, requestInfo) + if err == nil && cert != nil { + return cert, nil + } + } + + cl.logger.Warn("no client information found") + return nil, errors.New("no client cert found") + } + + serverConfigLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { + caPool := x509.NewCertPool() + + ret := &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + GetCertificate: serverLookup, + GetClientCertificate: clientLookup, + MinVersion: tls.VersionTLS12, + RootCAs: caPool, + ClientCAs: caPool, + NextProtos: clientHello.SupportedProtos, + CipherSuites: cl.cipherSuites, + } + + cl.l.RLock() + defer cl.l.RUnlock() + for _, v := range clientHello.SupportedProtos { + if handler, ok := cl.handlers[v]; ok { + caList, err := handler.CALookup(ctx) + if err != nil { + return nil, err + } + + for _, ca := range caList { + caPool.AddCert(ca) + } + return ret, nil + } + } + + cl.logger.Warn("no TLS config found for ALPN", "ALPN", clientHello.SupportedProtos) + return nil, errors.New("unsupported protocol") + } + + return &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + GetCertificate: serverLookup, + GetClientCertificate: clientLookup, + GetConfigForClient: serverConfigLookup, + MinVersion: tls.VersionTLS12, + CipherSuites: cl.cipherSuites, + }, nil +} + +// Run starts the tcp listeners and will accept connections until stop is +// called. This function blocks so should be called in a goroutine. +func (cl *Listener) Run(ctx context.Context) error { + // Get our TLS config + tlsConfig, err := cl.TLSConfig(ctx) + if err != nil { + cl.logger.Error("failed to get tls configuration when starting cluster listener", "error", err) + return err + } + + // The server supports all of the possible protos + tlsConfig.NextProtos = []string{"h2", consts.RequestForwardingALPN, consts.PerfStandbyALPN, consts.PerformanceReplicationALPN, consts.DRReplicationALPN} + + for i, laddr := range cl.listenerAddrs { + // closeCh is used to shutdown the spawned goroutines once this + // function returns + closeCh := make(chan struct{}) + + if cl.logger.IsInfo() { + cl.logger.Info("starting listener", "listener_address", laddr) + } + + // Create a TCP listener. We do this separately and specifically + // with TCP so that we can set deadlines. + tcpLn, err := net.ListenTCP("tcp", laddr) + if err != nil { + cl.logger.Error("error starting listener", "error", err) + continue + } + if laddr.String() != tcpLn.Addr().String() { + // If we listened on port 0, record the port the OS gave us. + cl.listenerAddrs[i] = tcpLn.Addr().(*net.TCPAddr) + } + + // Wrap the listener with TLS + tlsLn := tls.NewListener(tcpLn, tlsConfig) + + if cl.logger.IsInfo() { + cl.logger.Info("serving cluster requests", "cluster_listen_address", tlsLn.Addr()) + } + + cl.shutdownWg.Add(1) + // Start our listening loop + go func(closeCh chan struct{}, tlsLn net.Listener) { + defer func() { + cl.shutdownWg.Done() + tlsLn.Close() + close(closeCh) + }() + + for { + if atomic.LoadUint32(cl.shutdown) > 0 { + return + } + + // Set the deadline for the accept call. If it passes we'll get + // an error, causing us to check the condition at the top + // again. + tcpLn.SetDeadline(time.Now().Add(ListenerAcceptDeadline)) + + // Accept the connection + conn, err := tlsLn.Accept() + if err != nil { + if err, ok := err.(net.Error); ok && !err.Timeout() { + cl.logger.Debug("non-timeout error accepting on cluster port", "error", err) + } + if conn != nil { + conn.Close() + } + continue + } + if conn == nil { + continue + } + + // Type assert to TLS connection and handshake to populate the + // connection state + tlsConn := conn.(*tls.Conn) + + // Set a deadline for the handshake. This will cause clients + // that don't successfully auth to be kicked out quickly. + // Cluster connections should be reliable so being marginally + // aggressive here is fine. + err = tlsConn.SetDeadline(time.Now().Add(30 * time.Second)) + if err != nil { + if cl.logger.IsDebug() { + cl.logger.Debug("error setting deadline for cluster connection", "error", err) + } + tlsConn.Close() + continue + } + + err = tlsConn.Handshake() + if err != nil { + if cl.logger.IsDebug() { + cl.logger.Debug("error handshaking cluster connection", "error", err) + } + tlsConn.Close() + continue + } + + // Now, set it back to unlimited + err = tlsConn.SetDeadline(time.Time{}) + if err != nil { + if cl.logger.IsDebug() { + cl.logger.Debug("error setting deadline for cluster connection", "error", err) + } + tlsConn.Close() + continue + } + + cl.l.RLock() + handler, ok := cl.handlers[tlsConn.ConnectionState().NegotiatedProtocol] + cl.l.RUnlock() + if !ok { + cl.logger.Debug("unknown negotiated protocol on cluster port") + tlsConn.Close() + continue + } + + if err := handler.Handoff(ctx, cl.shutdownWg, closeCh, tlsConn); err != nil { + cl.logger.Error("error handling cluster connection", "error", err) + continue + } + } + }(closeCh, tlsLn) + } + + return nil +} + +// Stop stops the cluster listner +func (cl *Listener) Stop() { + // Set the shutdown flag. This will cause the listeners to shut down + // within the deadline in clusterListenerAcceptDeadline + atomic.StoreUint32(cl.shutdown, 1) + cl.logger.Info("forwarding rpc listeners stopped") + + // Wait for them all to shut down + cl.shutdownWg.Wait() + cl.logger.Info("rpc listeners successfully shut down") +} diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go new file mode 100644 index 00000000..cb792084 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/core.go @@ -0,0 +1,2185 @@ +package vault + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "path/filepath" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/reload" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/mlock" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/tlsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/shamir" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" + cache "github.com/patrickmn/go-cache" + "google.golang.org/grpc" +) + +const ( + // CoreLockPath is the path used to acquire a coordinating lock + // for a highly-available deploy. + CoreLockPath = "core/lock" + + // The poison pill is used as a check during certain scenarios to indicate + // to standby nodes that they should seal + poisonPillPath = "core/poison-pill" + + // coreLeaderPrefix is the prefix used for the UUID that contains + // the currently elected leader. + coreLeaderPrefix = "core/leader/" + + // knownPrimaryAddrsPrefix is used to store last-known cluster address + // information for primaries + knownPrimaryAddrsPrefix = "core/primary-addrs/" + + // coreKeyringCanaryPath is used as a canary to indicate to replicated + // clusters that they need to perform a rekey operation synchronously; this + // isn't keyring-canary to avoid ignoring it when ignoring core/keyring + coreKeyringCanaryPath = "core/canary-keyring" +) + +var ( + // ErrAlreadyInit is returned if the core is already + // initialized. This prevents a re-initialization. + ErrAlreadyInit = errors.New("Vault is already initialized") + + // ErrNotInit is returned if a non-initialized barrier + // is attempted to be unsealed. + ErrNotInit = errors.New("Vault is not initialized") + + // ErrInternalError is returned when we don't want to leak + // any information about an internal error + ErrInternalError = errors.New("internal error") + + // ErrHANotEnabled is returned if the operation only makes sense + // in an HA setting + ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode") + + // manualStepDownSleepPeriod is how long to sleep after a user-initiated + // step down of the active node, to prevent instantly regrabbing the lock. + // It's var not const so that tests can manipulate it. + manualStepDownSleepPeriod = 10 * time.Second + + // Functions only in the Enterprise version + enterprisePostUnseal = enterprisePostUnsealImpl + enterprisePreSeal = enterprisePreSealImpl + enterpriseSetupFilteredPaths = enterpriseSetupFilteredPathsImpl + startReplication = startReplicationImpl + stopReplication = stopReplicationImpl + LastWAL = lastWALImpl + LastPerformanceWAL = lastPerformanceWALImpl + PerformanceMerkleRoot = merkleRootImpl + DRMerkleRoot = merkleRootImpl + LastRemoteWAL = lastRemoteWALImpl + WaitUntilWALShipped = waitUntilWALShippedImpl +) + +// NonFatalError is an error that can be returned during NewCore that should be +// displayed but not cause a program exit +type NonFatalError struct { + Err error +} + +func (e *NonFatalError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *NonFatalError) Error() string { + return e.Err.Error() +} + +// NewNonFatalError returns a new non-fatal error. +func NewNonFatalError(err error) *NonFatalError { + return &NonFatalError{Err: err} +} + +// IsFatalError returns true if the given error is a fatal error. +func IsFatalError(err error) bool { + return !errwrap.ContainsType(err, new(NonFatalError)) +} + +// ErrInvalidKey is returned if there is a user-based error with a provided +// unseal key. This will be shown to the user, so should not contain +// information that is sensitive. +type ErrInvalidKey struct { + Reason string +} + +func (e *ErrInvalidKey) Error() string { + return fmt.Sprintf("invalid key: %v", e.Reason) +} + +type RegisterAuthFunc func(context.Context, time.Duration, string, *logical.Auth) error + +type activeAdvertisement struct { + RedirectAddr string `json:"redirect_addr"` + ClusterAddr string `json:"cluster_addr,omitempty"` + ClusterCert []byte `json:"cluster_cert,omitempty"` + ClusterKeyParams *certutil.ClusterKeyParams `json:"cluster_key_params,omitempty"` +} + +type unlockInformation struct { + Parts [][]byte + Nonce string +} + +type raftInformation struct { + challenge *physical.EncryptedBlobInfo + leaderClient *api.Client + leaderBarrierConfig *SealConfig + nonVoter bool +} + +// Core is used as the central manager of Vault activity. It is the primary point of +// interface for API handlers and is responsible for managing the logical and physical +// backends, router, security barrier, and audit trails. +type Core struct { + entCore + + // The registry of builtin plugins is passed in here as an interface because + // if it's used directly, it results in import cycles. + builtinRegistry BuiltinRegistry + + // N.B.: This is used to populate a dev token down replication, as + // otherwise, after replication is started, a dev would have to go through + // the generate-root process simply to talk to the new follower cluster. + devToken string + + // HABackend may be available depending on the physical backend + ha physical.HABackend + + // storageType is the the storage type set in the storage configuration + storageType string + + // redirectAddr is the address we advertise as leader if held + redirectAddr string + + // clusterAddr is the address we use for clustering + clusterAddr *atomic.Value + + // physical backend is the un-trusted backend with durable data + physical physical.Backend + + // underlyingPhysical will always point to the underlying backend + // implementation. This is an un-trusted backend with durable data + underlyingPhysical physical.Backend + + // seal is our seal, for seal configuration information + seal Seal + + // raftInfo will contain information required for this node to join as a + // peer to an existing raft cluster + raftInfo *raftInformation + + // migrationSeal is the seal to use during a migration operation. It is the + // seal we're migrating *from*. + migrationSeal Seal + sealMigrated *uint32 + + // unwrapSeal is the seal to use on Enterprise to unwrap values wrapped + // with the previous seal. + unwrapSeal Seal + + // barrier is the security barrier wrapping the physical backend + barrier SecurityBarrier + + // router is responsible for managing the mount points for logical backends. + router *Router + + // logicalBackends is the mapping of backends to use for this core + logicalBackends map[string]logical.Factory + + // credentialBackends is the mapping of backends to use for this core + credentialBackends map[string]logical.Factory + + // auditBackends is the mapping of backends to use for this core + auditBackends map[string]audit.Factory + + // stateLock protects mutable state + stateLock sync.RWMutex + sealed *uint32 + + standby bool + perfStandby bool + standbyDoneCh chan struct{} + standbyStopCh chan struct{} + manualStepDownCh chan struct{} + keepHALockOnStepDown *uint32 + heldHALock physical.Lock + + // unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce + unlockInfo *unlockInformation + + // generateRootProgress holds the shares until we reach enough + // to verify the master key + generateRootConfig *GenerateRootConfig + generateRootProgress [][]byte + generateRootLock sync.Mutex + + // These variables holds the config and shares we have until we reach + // enough to verify the appropriate master key. Note that the same lock is + // used; this isn't time-critical so this shouldn't be a problem. + barrierRekeyConfig *SealConfig + recoveryRekeyConfig *SealConfig + rekeyLock sync.RWMutex + + // mounts is loaded after unseal since it is a protected + // configuration + mounts *MountTable + + // mountsLock is used to ensure that the mounts table does not + // change underneath a calling function + mountsLock sync.RWMutex + + // auth is loaded after unseal since it is a protected + // configuration + auth *MountTable + + // authLock is used to ensure that the auth table does not + // change underneath a calling function + authLock sync.RWMutex + + // audit is loaded after unseal since it is a protected + // configuration + audit *MountTable + + // auditLock is used to ensure that the audit table does not + // change underneath a calling function + auditLock sync.RWMutex + + // auditBroker is used to ingest the audit events and fan + // out into the configured audit backends + auditBroker *AuditBroker + + // auditedHeaders is used to configure which http headers + // can be output in the audit logs + auditedHeaders *AuditedHeadersConfig + + // systemBackend is the backend which is used to manage internal operations + systemBackend *SystemBackend + + // cubbyholeBackend is the backend which manages the per-token storage + cubbyholeBackend *CubbyholeBackend + + // systemBarrierView is the barrier view for the system backend + systemBarrierView *BarrierView + + // expiration manager is used for managing LeaseIDs, + // renewal, expiration and revocation + expiration *ExpirationManager + + // rollback manager is used to run rollbacks periodically + rollback *RollbackManager + + // policy store is used to manage named ACL policies + policyStore *PolicyStore + + // token store is used to manage authentication tokens + tokenStore *TokenStore + + // identityStore is used to manage client entities + identityStore *IdentityStore + + // metricsCh is used to stop the metrics streaming + metricsCh chan struct{} + + // metricsMutex is used to prevent a race condition between + // metrics emission and sealing leading to a nil pointer + metricsMutex sync.Mutex + + defaultLeaseTTL time.Duration + maxLeaseTTL time.Duration + + // baseLogger is used to avoid ResetNamed as it strips useful prefixes in + // e.g. testing + baseLogger log.Logger + logger log.Logger + + // cachingDisabled indicates whether caches are disabled + cachingDisabled bool + // Cache stores the actual cache; we always have this but may bypass it if + // disabled + physicalCache physical.ToggleablePurgemonster + + // reloadFuncs is a map containing reload functions + reloadFuncs map[string][]reload.ReloadFunc + + // reloadFuncsLock controls access to the funcs + reloadFuncsLock sync.RWMutex + + // wrappingJWTKey is the key used for generating JWTs containing response + // wrapping information + wrappingJWTKey *ecdsa.PrivateKey + + // + // Cluster information + // + // Name + clusterName string + // Specific cipher suites to use for clustering, if any + clusterCipherSuites []uint16 + // Used to modify cluster parameters + clusterParamsLock sync.RWMutex + // The private key stored in the barrier used for establishing + // mutually-authenticated connections between Vault cluster members + localClusterPrivateKey *atomic.Value + // The local cluster cert + localClusterCert *atomic.Value + // The parsed form of the local cluster cert + localClusterParsedCert *atomic.Value + // The TCP addresses we should use for clustering + clusterListenerAddrs []*net.TCPAddr + // The handler to use for request forwarding + clusterHandler http.Handler + // Write lock used to ensure that we don't have multiple connections adjust + // this value at the same time + requestForwardingConnectionLock sync.RWMutex + // Lock for the leader values, ensuring we don't run the parts of Leader() + // that change things concurrently + leaderParamsLock sync.RWMutex + // Current cluster leader values + clusterLeaderParams *atomic.Value + // Info on cluster members + clusterPeerClusterAddrsCache *cache.Cache + // The context for the client + rpcClientConnContext context.Context + // The function for canceling the client connection + rpcClientConnCancelFunc context.CancelFunc + // The grpc ClientConn for RPC calls + rpcClientConn *grpc.ClientConn + // The grpc forwarding client + rpcForwardingClient *forwardingClient + // The UUID used to hold the leader lock. Only set on active node + leaderUUID string + + // CORS Information + corsConfig *CORSConfig + + // The active set of upstream cluster addresses; stored via the Echo + // mechanism, loaded by the balancer + atomicPrimaryClusterAddrs *atomic.Value + + atomicPrimaryFailoverAddrs *atomic.Value + + // replicationState keeps the current replication state cached for quick + // lookup; activeNodeReplicationState stores the active value on standbys + replicationState *uint32 + activeNodeReplicationState *uint32 + + // uiConfig contains UI configuration + uiConfig *UIConfig + + // rawEnabled indicates whether the Raw endpoint is enabled + rawEnabled bool + + // pluginDirectory is the location vault will look for plugin binaries + pluginDirectory string + + // pluginCatalog is used to manage plugin configurations + pluginCatalog *PluginCatalog + + enableMlock bool + + // This can be used to trigger operations to stop running when Vault is + // going to be shut down, stepped down, or sealed + activeContext context.Context + activeContextCancelFunc *atomic.Value + + // Stores the sealunwrapper for downgrade needs + sealUnwrapper physical.Backend + + // unsealwithStoredKeysLock is a mutex that prevents multiple processes from + // unsealing with stored keys are the same time. + unsealWithStoredKeysLock sync.Mutex + + // Stores any funcs that should be run on successful postUnseal + postUnsealFuncs []func() + + // Stores any funcs that should be run on successful barrier unseal in + // recovery mode + postRecoveryUnsealFuncs []func() error + + // replicationFailure is used to mark when replication has entered an + // unrecoverable failure. + replicationFailure *uint32 + + // disablePerfStanby is used to tell a standby not to attempt to become a + // perf standby + disablePerfStandby bool + + licensingStopCh chan struct{} + + // Stores loggers so we can reset the level + allLoggers []log.Logger + allLoggersLock sync.RWMutex + + // Can be toggled atomically to cause the core to never try to become + // active, or give up active as soon as it gets it + neverBecomeActive *uint32 + + // loadCaseSensitiveIdentityStore enforces the loading of identity store + // artifacts in a case sensitive manner. To be used only in testing. + loadCaseSensitiveIdentityStore bool + + // clusterListener starts up and manages connections on the cluster ports + clusterListener *atomic.Value + + // Telemetry objects + metricsHelper *metricsutil.MetricsHelper + + // Stores request counters + counters counters + + // Stores the raft applied index for standby nodes + raftFollowerStates *raftFollowerStates + // Stop channel for raft TLS rotations + raftTLSRotationStopCh chan struct{} + // Stores the pending peers we are waiting to give answers + pendingRaftPeers map[string][]byte + + // rawConfig stores the config as-is from the provided server configuration. + rawConfig *server.Config + + coreNumber int + + // secureRandomReader is the reader used for CSP operations + secureRandomReader io.Reader + + recoveryMode bool +} + +// CoreConfig is used to parameterize a core +type CoreConfig struct { + DevToken string `json:"dev_token" structs:"dev_token" mapstructure:"dev_token"` + + BuiltinRegistry BuiltinRegistry `json:"builtin_registry" structs:"builtin_registry" mapstructure:"builtin_registry"` + + LogicalBackends map[string]logical.Factory `json:"logical_backends" structs:"logical_backends" mapstructure:"logical_backends"` + + CredentialBackends map[string]logical.Factory `json:"credential_backends" structs:"credential_backends" mapstructure:"credential_backends"` + + AuditBackends map[string]audit.Factory `json:"audit_backends" structs:"audit_backends" mapstructure:"audit_backends"` + + Physical physical.Backend `json:"physical" structs:"physical" mapstructure:"physical"` + + StorageType string `json:"storage_type" structs:"storage_type" mapstructure:"storage_type"` + + // May be nil, which disables HA operations + HAPhysical physical.HABackend `json:"ha_physical" structs:"ha_physical" mapstructure:"ha_physical"` + + Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"` + + SecureRandomReader io.Reader `json:"secure_random_reader" structs:"secure_random_reader" mapstructure:"secure_random_reader"` + + Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"` + + // Disables the LRU cache on the physical backend + DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"` + + // Disables mlock syscall + DisableMlock bool `json:"disable_mlock" structs:"disable_mlock" mapstructure:"disable_mlock"` + + // Custom cache size for the LRU cache on the physical backend, or zero for default + CacheSize int `json:"cache_size" structs:"cache_size" mapstructure:"cache_size"` + + // Set as the leader address for HA + RedirectAddr string `json:"redirect_addr" structs:"redirect_addr" mapstructure:"redirect_addr"` + + // Set as the cluster address for HA + ClusterAddr string `json:"cluster_addr" structs:"cluster_addr" mapstructure:"cluster_addr"` + + DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + + MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + + ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"` + + ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"` + + EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"` + + // Enable the raw endpoint + EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"` + + PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"` + + DisableSealWrap bool `json:"disable_sealwrap" structs:"disable_sealwrap" mapstructure:"disable_sealwrap"` + + RawConfig *server.Config + + ReloadFuncs *map[string][]reload.ReloadFunc + ReloadFuncsLock *sync.RWMutex + + // Licensing + LicensingConfig *LicensingConfig + // Don't set this unless in dev mode, ideally only when using inmem + DevLicenseDuration time.Duration + + DisablePerformanceStandby bool + DisableIndexing bool + DisableKeyEncodingChecks bool + + AllLoggers []log.Logger + + // Telemetry objects + MetricsHelper *metricsutil.MetricsHelper + + CounterSyncInterval time.Duration + + RecoveryMode bool +} + +func (c *CoreConfig) Clone() *CoreConfig { + return &CoreConfig{ + DevToken: c.DevToken, + LogicalBackends: c.LogicalBackends, + CredentialBackends: c.CredentialBackends, + AuditBackends: c.AuditBackends, + Physical: c.Physical, + HAPhysical: c.HAPhysical, + Seal: c.Seal, + Logger: c.Logger, + DisableCache: c.DisableCache, + DisableMlock: c.DisableMlock, + CacheSize: c.CacheSize, + StorageType: c.StorageType, + RedirectAddr: c.RedirectAddr, + ClusterAddr: c.ClusterAddr, + DefaultLeaseTTL: c.DefaultLeaseTTL, + MaxLeaseTTL: c.MaxLeaseTTL, + ClusterName: c.ClusterName, + ClusterCipherSuites: c.ClusterCipherSuites, + EnableUI: c.EnableUI, + EnableRaw: c.EnableRaw, + PluginDirectory: c.PluginDirectory, + DisableSealWrap: c.DisableSealWrap, + ReloadFuncs: c.ReloadFuncs, + ReloadFuncsLock: c.ReloadFuncsLock, + LicensingConfig: c.LicensingConfig, + DevLicenseDuration: c.DevLicenseDuration, + DisablePerformanceStandby: c.DisablePerformanceStandby, + DisableIndexing: c.DisableIndexing, + AllLoggers: c.AllLoggers, + CounterSyncInterval: c.CounterSyncInterval, + } +} + +// NewCore is used to construct a new core +func NewCore(conf *CoreConfig) (*Core, error) { + if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() { + if conf.RedirectAddr == "" { + return nil, fmt.Errorf("missing API address, please set in configuration or via environment") + } + } + + if conf.DefaultLeaseTTL == 0 { + conf.DefaultLeaseTTL = defaultLeaseTTL + } + if conf.MaxLeaseTTL == 0 { + conf.MaxLeaseTTL = maxLeaseTTL + } + if conf.DefaultLeaseTTL > conf.MaxLeaseTTL { + return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL") + } + + // Validate the advertise addr if its given to us + if conf.RedirectAddr != "" { + u, err := url.Parse(conf.RedirectAddr) + if err != nil { + return nil, errwrap.Wrapf("redirect address is not valid url: {{err}}", err) + } + + if u.Scheme == "" { + return nil, fmt.Errorf("redirect address must include scheme (ex. 'http')") + } + } + + // Make a default logger if not provided + if conf.Logger == nil { + conf.Logger = logging.NewVaultLogger(log.Trace) + } + + // Instantiate a non-nil raw config if none is provided + if conf.RawConfig == nil { + conf.RawConfig = new(server.Config) + } + + syncInterval := conf.CounterSyncInterval + if syncInterval.Nanoseconds() == 0 { + syncInterval = 30 * time.Second + } + + // secureRandomReader cannot be nil + if conf.SecureRandomReader == nil { + conf.SecureRandomReader = rand.Reader + } + + // Setup the core + c := &Core{ + entCore: entCore{}, + devToken: conf.DevToken, + physical: conf.Physical, + underlyingPhysical: conf.Physical, + storageType: conf.StorageType, + redirectAddr: conf.RedirectAddr, + clusterAddr: new(atomic.Value), + clusterListener: new(atomic.Value), + seal: conf.Seal, + router: NewRouter(), + sealed: new(uint32), + sealMigrated: new(uint32), + standby: true, + baseLogger: conf.Logger, + logger: conf.Logger.Named("core"), + defaultLeaseTTL: conf.DefaultLeaseTTL, + maxLeaseTTL: conf.MaxLeaseTTL, + cachingDisabled: conf.DisableCache, + clusterName: conf.ClusterName, + clusterPeerClusterAddrsCache: cache.New(3*cluster.HeartbeatInterval, time.Second), + enableMlock: !conf.DisableMlock, + rawEnabled: conf.EnableRaw, + replicationState: new(uint32), + atomicPrimaryClusterAddrs: new(atomic.Value), + atomicPrimaryFailoverAddrs: new(atomic.Value), + localClusterPrivateKey: new(atomic.Value), + localClusterCert: new(atomic.Value), + localClusterParsedCert: new(atomic.Value), + activeNodeReplicationState: new(uint32), + keepHALockOnStepDown: new(uint32), + replicationFailure: new(uint32), + disablePerfStandby: true, + activeContextCancelFunc: new(atomic.Value), + allLoggers: conf.AllLoggers, + builtinRegistry: conf.BuiltinRegistry, + neverBecomeActive: new(uint32), + clusterLeaderParams: new(atomic.Value), + metricsHelper: conf.MetricsHelper, + secureRandomReader: conf.SecureRandomReader, + rawConfig: conf.RawConfig, + counters: counters{ + requests: new(uint64), + syncInterval: syncInterval, + }, + recoveryMode: conf.RecoveryMode, + } + + atomic.StoreUint32(c.sealed, 1) + c.allLoggers = append(c.allLoggers, c.logger) + + c.router.logger = c.logger.Named("router") + c.allLoggers = append(c.allLoggers, c.router.logger) + + atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled)) + c.localClusterCert.Store(([]byte)(nil)) + c.localClusterParsedCert.Store((*x509.Certificate)(nil)) + c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil)) + + c.clusterLeaderParams.Store((*ClusterLeaderParams)(nil)) + c.clusterAddr.Store(conf.ClusterAddr) + c.activeContextCancelFunc.Store((context.CancelFunc)(nil)) + + switch conf.ClusterCipherSuites { + case "tls12": + // Do nothing, let Go use the default + + case "": + // Add in forward compatible TLS 1.3 suites, followed by handpicked 1.2 suites + c.clusterCipherSuites = []uint16{ + // 1.3 + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + tls.TLS_CHACHA20_POLY1305_SHA256, + // 1.2 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + } + + default: + suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites) + if err != nil { + return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err) + } + c.clusterCipherSuites = suites + } + + // Load CORS config and provide a value for the core field. + c.corsConfig = &CORSConfig{ + core: c, + Enabled: new(uint32), + } + + if c.seal == nil { + c.seal = NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir"))) + } + c.seal.SetCore(c) + + if err := coreInit(c, conf); err != nil { + return nil, err + } + + if !conf.DisableMlock { + // Ensure our memory usage is locked into physical RAM + if err := mlock.LockMemory(); err != nil { + return nil, fmt.Errorf( + "Failed to lock memory: %v\n\n"+ + "This usually means that the mlock syscall is not available.\n"+ + "Vault uses mlock to prevent memory from being swapped to\n"+ + "disk. This requires root privileges as well as a machine\n"+ + "that supports mlock. Please enable mlock on your system or\n"+ + "disable Vault from using it. To disable Vault from using it,\n"+ + "set the `disable_mlock` configuration option in your configuration\n"+ + "file.", + err) + } + } + + var err error + + // Construct a new AES-GCM barrier + c.barrier, err = NewAESGCMBarrier(c.physical) + if err != nil { + return nil, errwrap.Wrapf("barrier setup failed: {{err}}", err) + } + + // We create the funcs here, then populate the given config with it so that + // the caller can share state + conf.ReloadFuncsLock = &c.reloadFuncsLock + c.reloadFuncsLock.Lock() + c.reloadFuncs = make(map[string][]reload.ReloadFunc) + c.reloadFuncsLock.Unlock() + conf.ReloadFuncs = &c.reloadFuncs + + // All the things happening below this are not required in + // recovery mode + if c.recoveryMode { + return c, nil + } + + if conf.PluginDirectory != "" { + c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory) + if err != nil { + return nil, errwrap.Wrapf("core setup failed, could not verify plugin directory: {{err}}", err) + } + } + + createSecondaries(c, conf) + + if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() { + c.ha = conf.HAPhysical + } + + logicalBackends := make(map[string]logical.Factory) + for k, f := range conf.LogicalBackends { + logicalBackends[k] = f + } + _, ok := logicalBackends["kv"] + if !ok { + logicalBackends["kv"] = PassthroughBackendFactory + } + + logicalBackends["cubbyhole"] = CubbyholeBackendFactory + logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + sysBackendLogger := conf.Logger.Named("system") + c.AddLogger(sysBackendLogger) + b := NewSystemBackend(c, sysBackendLogger) + if err := b.Setup(ctx, config); err != nil { + return nil, err + } + return b, nil + } + logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + identityLogger := conf.Logger.Named("identity") + c.AddLogger(identityLogger) + return NewIdentityStore(ctx, c, config, identityLogger) + } + addExtraLogicalBackends(c, logicalBackends) + c.logicalBackends = logicalBackends + + credentialBackends := make(map[string]logical.Factory) + for k, f := range conf.CredentialBackends { + credentialBackends[k] = f + } + credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + tsLogger := conf.Logger.Named("token") + c.AddLogger(tsLogger) + return NewTokenStore(ctx, tsLogger, c, config) + } + addExtraCredentialBackends(c, credentialBackends) + c.credentialBackends = credentialBackends + + auditBackends := make(map[string]audit.Factory) + for k, f := range conf.AuditBackends { + auditBackends[k] = f + } + c.auditBackends = auditBackends + + uiStoragePrefix := systemBarrierPrefix + "ui" + c.uiConfig = NewUIConfig(conf.EnableUI, physical.NewView(c.physical, uiStoragePrefix), NewBarrierView(c.barrier, uiStoragePrefix)) + + c.clusterListener.Store((*cluster.Listener)(nil)) + + return c, nil +} + +// Shutdown is invoked when the Vault instance is about to be terminated. It +// should not be accessible as part of an API call as it will cause an availability +// problem. It is only used to gracefully quit in the case of HA so that failover +// happens as quickly as possible. +func (c *Core) Shutdown() error { + c.logger.Debug("shutdown called") + return c.sealInternal() +} + +// CORSConfig returns the current CORS configuration +func (c *Core) CORSConfig() *CORSConfig { + return c.corsConfig +} + +func (c *Core) GetContext() (context.Context, context.CancelFunc) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + + return context.WithCancel(namespace.RootContext(c.activeContext)) +} + +// Sealed checks if the Vault is current sealed +func (c *Core) Sealed() bool { + return atomic.LoadUint32(c.sealed) == 1 +} + +// SecretProgress returns the number of keys provided so far +func (c *Core) SecretProgress() (int, string) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + switch c.unlockInfo { + case nil: + return 0, "" + default: + return len(c.unlockInfo.Parts), c.unlockInfo.Nonce + } +} + +// ResetUnsealProcess removes the current unlock parts from memory, to reset +// the unsealing process +func (c *Core) ResetUnsealProcess() { + c.stateLock.Lock() + defer c.stateLock.Unlock() + c.unlockInfo = nil +} + +// Unseal is used to provide one of the key parts to unseal the Vault. +// +// They key given as a parameter will automatically be zerod after +// this method is done with it. If you want to keep the key around, a copy +// should be made. +func (c *Core) Unseal(key []byte) (bool, error) { + return c.unseal(key, false) +} + +func (c *Core) UnsealWithRecoveryKeys(key []byte) (bool, error) { + return c.unseal(key, true) +} + +func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) { + defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now()) + + c.stateLock.Lock() + defer c.stateLock.Unlock() + + c.logger.Debug("unseal key supplied") + + ctx := context.Background() + + // Explicitly check for init status. This also checks if the seal + // configuration is valid (i.e. non-nil). + init, err := c.Initialized(ctx) + if err != nil { + return false, err + } + if !init && !c.isRaftUnseal() { + return false, ErrNotInit + } + + // Verify the key length + min, max := c.barrier.KeyLength() + max += shamir.ShareOverhead + if len(key) < min { + return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)} + } + if len(key) > max { + return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)} + } + + // Check if already unsealed + if !c.Sealed() { + return true, nil + } + + sealToUse := c.seal + if c.migrationSeal != nil { + c.logger.Info("unsealing using migration seal") + sealToUse = c.migrationSeal + } + + // unsealPart returns either a master key (legacy shamir) or an unseal + // key (new-style shamir). + masterKey, err := c.unsealPart(ctx, sealToUse, key, useRecoveryKeys) + if err != nil { + return false, err + } + + if masterKey != nil { + if c.seal.BarrierType() == seal.Shamir { + // If this is a legacy shamir seal this serves no purpose but it + // doesn't hurt. + err = c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(masterKey) + if err != nil { + return false, err + } + } + + if !c.isRaftUnseal() { + if c.seal.BarrierType() == seal.Shamir { + cfg, err := c.seal.BarrierConfig(ctx) + if err != nil { + return false, err + } + + // If there is a stored key, retrieve it. + if cfg.StoredShares > 0 { + if err != nil { + return false, err + } + // Here's where we actually test that the provided unseal + // key is valid: can it decrypt the stored master key? + storedKeys, err := c.seal.GetStoredKeys(ctx) + if err != nil { + return false, err + } + if len(storedKeys) == 0 { + return false, fmt.Errorf("shamir seal with stored keys configured but no stored keys found") + } + masterKey = storedKeys[0] + } + } + + return c.unsealInternal(ctx, masterKey) + } + + // If we are in the middle of a raft join send the answer and wait for + // data to start streaming in. + if err := c.joinRaftSendAnswer(ctx, c.seal.GetAccess(), c.raftInfo); err != nil { + return false, err + } + // Reset the state + c.raftInfo = nil + + go func() { + keyringFound := false + haveMasterKey := c.seal.StoredKeysSupported() != StoredKeysSupportedShamirMaster + defer func() { + if keyringFound && haveMasterKey { + _, err := c.unsealInternal(ctx, masterKey) + if err != nil { + c.logger.Error("failed to unseal", "error", err) + } + } + }() + + // Wait until we at least have the keyring before we attempt to + // unseal the node. + for { + if !keyringFound { + keys, err := c.underlyingPhysical.List(ctx, keyringPrefix) + if err != nil { + c.logger.Error("failed to list physical keys", "error", err) + return + } + if strutil.StrListContains(keys, "keyring") { + keyringFound = true + } + } + if !haveMasterKey { + keys, err := c.seal.GetStoredKeys(ctx) + if err != nil { + c.logger.Error("failed to read master key", "error", err) + return + } + if len(keys) > 0 { + haveMasterKey = true + masterKey = keys[0] + } + } + if keyringFound && haveMasterKey { + return + } + time.Sleep(1 * time.Second) + } + }() + + // Return Vault as sealed since unsealing happens in background + // which gets delayed until the data from the leader is streamed to + // the follower. + return true, nil + } + + return false, nil +} + +// unsealPart takes in a key share, and returns the master key if the threshold +// is met. If recovery keys are supported, recovery key shares may be provided. +func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecoveryKeys bool) ([]byte, error) { + // Check if we already have this piece + if c.unlockInfo != nil { + for _, existing := range c.unlockInfo.Parts { + if subtle.ConstantTimeCompare(existing, key) == 1 { + return nil, nil + } + } + } else { + uuid, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + c.unlockInfo = &unlockInformation{ + Nonce: uuid, + } + } + + // Store this key + c.unlockInfo.Parts = append(c.unlockInfo.Parts, key) + + var config *SealConfig + var err error + + switch { + case seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil): + config, err = seal.RecoveryConfig(ctx) + case c.isRaftUnseal(): + // Ignore follower's seal config and refer to leader's barrier + // configuration. + config = c.raftInfo.leaderBarrierConfig + default: + config, err = seal.BarrierConfig(ctx) + } + if err != nil { + return nil, err + } + + // Check if we don't have enough keys to unlock, proceed through the rest of + // the call only if we have met the threshold + if len(c.unlockInfo.Parts) < config.SecretThreshold { + if c.logger.IsDebug() { + c.logger.Debug("cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce) + } + return nil, nil + } + + // Best-effort memzero of unlock parts once we're done with them + defer func() { + for i := range c.unlockInfo.Parts { + memzero(c.unlockInfo.Parts[i]) + } + c.unlockInfo = nil + }() + + // Recover the split key. recoveredKey is the shamir combined + // key, or the single provided key if the threshold is 1. + var recoveredKey []byte + var masterKey []byte + var recoveryKey []byte + if config.SecretThreshold == 1 { + recoveredKey = make([]byte, len(c.unlockInfo.Parts[0])) + copy(recoveredKey, c.unlockInfo.Parts[0]) + } else { + recoveredKey, err = shamir.Combine(c.unlockInfo.Parts) + if err != nil { + return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err) + } + } + + if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) { + // Verify recovery key. + if err := seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil { + return nil, err + } + recoveryKey = recoveredKey + + // Get stored keys and shamir combine into single master key. Unsealing with + // recovery keys currently does not support: 1) mixed stored and non-stored + // keys setup, nor 2) seals that support recovery keys but not stored keys. + // If insufficient shares are provided, shamir.Combine will error, and if + // no stored keys are found it will return masterKey as nil. + if seal.StoredKeysSupported() == StoredKeysSupportedGeneric { + masterKeyShares, err := seal.GetStoredKeys(ctx) + if err != nil { + return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err) + } + + switch len(masterKeyShares) { + case 0: + return nil, errors.New("seal returned no master key shares") + case 1: + masterKey = masterKeyShares[0] + default: + masterKey, err = shamir.Combine(masterKeyShares) + if err != nil { + return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err) + } + } + } + } else { + masterKey = recoveredKey + } + newRecoveryKey := masterKey + + // If we have a migration seal, now's the time! + if c.migrationSeal != nil { + if seal.StoredKeysSupported() == StoredKeysSupportedShamirMaster { + err = seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(masterKey) + if err != nil { + return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) + } + storedKeys, err := seal.GetStoredKeys(ctx) + if err != nil { + return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err) + } + masterKey = storedKeys[0] + } + // Unseal the barrier so we can rekey + if err := c.barrier.Unseal(ctx, masterKey); err != nil { + return nil, errwrap.Wrapf("error unsealing barrier with constructed master key: {{err}}", err) + } + defer c.barrier.Seal() + + switch { + case c.migrationSeal.RecoveryKeySupported() && c.seal.RecoveryKeySupported(): + // Set the recovery and barrier keys to be the same. + recoveryKey, err := c.migrationSeal.RecoveryKey(ctx) + if err != nil { + return nil, errwrap.Wrapf("error getting recovery key to set on new seal: {{err}}", err) + } + + if err := c.seal.SetRecoveryKey(ctx, recoveryKey); err != nil { + return nil, errwrap.Wrapf("error setting new recovery key information during migrate: {{err}}", err) + } + + barrierKeys, err := c.migrationSeal.GetStoredKeys(ctx) + if err != nil { + return nil, errwrap.Wrapf("error getting stored keys to set on new seal: {{err}}", err) + } + + if err := c.seal.SetStoredKeys(ctx, barrierKeys); err != nil { + return nil, errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err) + } + + case c.migrationSeal.RecoveryKeySupported(): + // Auto to Shamir, since recovery key isn't supported on new seal + + // In this case we have to ensure that the recovery information was + // set properly. + if recoveryKey == nil { + return nil, errors.New("did not get expected recovery information to set new seal during migration") + } + + // We have recovery keys; we're going to use them as the new + // shamir KeK. + err = c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(recoveryKey) + if err != nil { + return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) + } + if err := c.seal.SetStoredKeys(ctx, [][]byte{masterKey}); err != nil { + return nil, errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err) + } + + masterKey = recoveryKey + + case c.seal.RecoveryKeySupported(): + // The new seal will have recovery keys; we set it to the existing + // master key, so barrier key shares -> recovery key shares + if err := c.seal.SetRecoveryKey(ctx, newRecoveryKey); err != nil { + return nil, errwrap.Wrapf("error setting new recovery key information: {{err}}", err) + } + + // Generate a new master key + newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) + if err != nil { + return nil, errwrap.Wrapf("error generating new master key: {{err}}", err) + } + + // Rekey the barrier + if err := c.barrier.Rekey(ctx, newMasterKey); err != nil { + return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err) + } + + // Store the new master key + if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil { + return nil, errwrap.Wrapf("error storing new master key: {[err}}", err) + } + + // Return the new key so it can be used to unlock the barrier + masterKey = newMasterKey + + default: + return nil, errors.New("unhandled migration case (shamir to shamir)") + } + + // At this point we've swapped things around and need to ensure we + // don't migrate again + c.migrationSeal = nil + atomic.StoreUint32(c.sealMigrated, 1) + + // Ensure we populate the new values + bc, err := c.seal.BarrierConfig(ctx) + if err != nil { + return nil, errwrap.Wrapf("error fetching barrier config after migration: {{err}}", err) + } + if err := c.seal.SetBarrierConfig(ctx, bc); err != nil { + return nil, errwrap.Wrapf("error storing barrier config after migration: {{err}}", err) + } + + if c.seal.RecoveryKeySupported() { + rc, err := c.seal.RecoveryConfig(ctx) + if err != nil { + return nil, errwrap.Wrapf("error fetching recovery config after migration: {{err}}", err) + } + if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil { + return nil, errwrap.Wrapf("error storing recovery config after migration: {{err}}", err) + } + } + } + + return masterKey, nil +} + +// unsealInternal takes in the master key and attempts to unseal the barrier. +// N.B.: This must be called with the state write lock held. +func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, error) { + defer memzero(masterKey) + + // Attempt to unlock + if err := c.barrier.Unseal(ctx, masterKey); err != nil { + return false, err + } + + if err := preUnsealInternal(ctx, c); err != nil { + return false, err + } + + if err := c.startClusterListener(ctx); err != nil { + return false, err + } + + if err := c.startRaftStorage(ctx); err != nil { + return false, err + } + + // Do post-unseal setup if HA is not enabled + if c.ha == nil { + // We still need to set up cluster info even if it's not part of a + // cluster right now. This also populates the cached cluster object. + if err := c.setupCluster(ctx); err != nil { + c.logger.Error("cluster setup failed", "error", err) + c.barrier.Seal() + c.logger.Warn("vault is sealed") + return false, err + } + + ctx, ctxCancel := context.WithCancel(namespace.RootContext(nil)) + if err := c.postUnseal(ctx, ctxCancel, standardUnsealStrategy{}); err != nil { + c.logger.Error("post-unseal setup failed", "error", err) + c.barrier.Seal() + c.logger.Warn("vault is sealed") + return false, err + } + + c.standby = false + } else { + // Go to standby mode, wait until we are active to unseal + c.standbyDoneCh = make(chan struct{}) + c.manualStepDownCh = make(chan struct{}) + c.standbyStopCh = make(chan struct{}) + go c.runStandby(c.standbyDoneCh, c.manualStepDownCh, c.standbyStopCh) + } + + // Force a cache bust here, which will also run migration code + if c.seal.RecoveryKeySupported() { + c.seal.SetRecoveryConfig(ctx, nil) + } + + // Success! + atomic.StoreUint32(c.sealed, 0) + + if c.logger.IsInfo() { + c.logger.Info("vault is unsealed") + } + + if c.ha != nil { + sd, ok := c.ha.(physical.ServiceDiscovery) + if ok { + if err := sd.NotifySealedStateChange(); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("failed to notify unsealed status", "error", err) + } + } + } + } + return true, nil +} + +// SealWithRequest takes in a logical.Request, acquires the lock, and passes +// through to sealInternal +func (c *Core) SealWithRequest(httpCtx context.Context, req *logical.Request) error { + defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now()) + + if c.Sealed() { + return nil + } + + c.stateLock.RLock() + + // This will unlock the read lock + // We use background context since we may not be active + ctx, cancel := context.WithCancel(namespace.RootContext(nil)) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + case <-httpCtx.Done(): + cancel() + } + }() + + // This will unlock the read lock + return c.sealInitCommon(ctx, req) +} + +// Seal takes in a token and creates a logical.Request, acquires the lock, and +// passes through to sealInternal +func (c *Core) Seal(token string) error { + defer metrics.MeasureSince([]string{"core", "seal"}, time.Now()) + + if c.Sealed() { + return nil + } + + c.stateLock.RLock() + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/seal", + ClientToken: token, + } + + // This will unlock the read lock + // We use background context since we may not be active + return c.sealInitCommon(namespace.RootContext(nil), req) +} + +// sealInitCommon is common logic for Seal and SealWithRequest and is used to +// re-seal the Vault. This requires the Vault to be unsealed again to perform +// any further operations. Note: this function will read-unlock the state lock. +func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr error) { + defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now()) + + var unlocked bool + defer func() { + if !unlocked { + c.stateLock.RUnlock() + } + }() + + if req == nil { + retErr = multierror.Append(retErr, errors.New("nil request to seal")) + return retErr + } + + // Since there is no token store in standby nodes, sealing cannot be done. + // Ideally, the request has to be forwarded to leader node for validation + // and the operation should be performed. But for now, just returning with + // an error and recommending a vault restart, which essentially does the + // same thing. + if c.standby { + c.logger.Error("vault cannot seal when in standby mode; please restart instead") + retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead")) + return retErr + } + + acl, te, entity, identityPolicies, err := c.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + retErr = multierror.Append(retErr, err) + return retErr + } + + // Audit-log the request before going any further + auth := &logical.Auth{ + ClientToken: req.ClientToken, + Accessor: req.ClientTokenAccessor, + } + if te != nil { + auth.IdentityPolicies = identityPolicies[te.NamespaceID] + delete(identityPolicies, te.NamespaceID) + auth.ExternalNamespacePolicies = identityPolicies + auth.TokenPolicies = te.Policies + auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...) + auth.Metadata = te.Meta + auth.DisplayName = te.DisplayName + auth.EntityID = te.EntityID + auth.TokenType = te.Type + } + + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "request_path", req.Path, "error", err) + retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue")) + return retErr + } + + if entity != nil && entity.Disabled { + c.logger.Warn("permission denied as the entity on the token is disabled") + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + if te != nil && te.EntityID != "" && entity == nil { + c.logger.Warn("permission denied as the entity on the token is invalid") + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + + // Attempt to use the token (decrement num_uses) + // On error bail out; if the token has been revoked, bail out too + if te != nil { + te, err = c.tokenStore.UseToken(ctx, te) + if err != nil { + c.logger.Error("failed to use token", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return retErr + } + if te == nil { + // Token is no longer valid + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + } + + // Verify that this operation is allowed + authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{ + RootPrivsRequired: true, + }) + if !authResults.Allowed { + retErr = multierror.Append(retErr, authResults.Error) + if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError { + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + } + return retErr + } + + if te != nil && te.NumUses == tokenRevocationPending { + // Token needs to be revoked. We do this immediately here because + // we won't have a token store after sealing. + leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(c.activeContext, te) + if err == nil { + err = c.expiration.Revoke(c.activeContext, leaseID) + } + if err != nil { + c.logger.Error("token needed revocation before seal but failed to revoke", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + } + } + + // Unlock; sealing will grab the lock when needed + unlocked = true + c.stateLock.RUnlock() + + sealErr := c.sealInternal() + + if sealErr != nil { + retErr = multierror.Append(retErr, sealErr) + } + + return +} + +// UIEnabled returns if the UI is enabled +func (c *Core) UIEnabled() bool { + return c.uiConfig.Enabled() +} + +// UIHeaders returns configured UI headers +func (c *Core) UIHeaders() (http.Header, error) { + return c.uiConfig.Headers(context.Background()) +} + +// sealInternal is an internal method used to seal the vault. It does not do +// any authorization checking. +func (c *Core) sealInternal() error { + return c.sealInternalWithOptions(true, false, true) +} + +func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock, shutdownRaft bool) error { + // Mark sealed, and if already marked return + if swapped := atomic.CompareAndSwapUint32(c.sealed, 0, 1); !swapped { + return nil + } + + c.logger.Info("marked as sealed") + + // Clear forwarding clients + c.requestForwardingConnectionLock.Lock() + c.clearForwardingClients() + c.requestForwardingConnectionLock.Unlock() + + activeCtxCancel := c.activeContextCancelFunc.Load().(context.CancelFunc) + cancelCtxAndLock := func() { + doneCh := make(chan struct{}) + go func() { + select { + case <-doneCh: + // Attempt to drain any inflight requests + case <-time.After(DefaultMaxRequestDuration): + if activeCtxCancel != nil { + activeCtxCancel() + } + } + }() + + c.stateLock.Lock() + close(doneCh) + // Stop requests from processing + if activeCtxCancel != nil { + activeCtxCancel() + } + } + + // Do pre-seal teardown if HA is not enabled + if c.ha == nil { + if grabStateLock { + cancelCtxAndLock() + defer c.stateLock.Unlock() + } + // Even in a non-HA context we key off of this for some things + c.standby = true + + // Stop requests from processing + if activeCtxCancel != nil { + activeCtxCancel() + } + + if err := c.preSeal(); err != nil { + c.logger.Error("pre-seal teardown failed", "error", err) + return fmt.Errorf("internal error") + } + } else { + // If we are keeping the lock we already have the state write lock + // held. Otherwise grab it here so that when stopCh is triggered we are + // locked. + if keepHALock { + atomic.StoreUint32(c.keepHALockOnStepDown, 1) + } + if grabStateLock { + cancelCtxAndLock() + defer c.stateLock.Unlock() + } + + // If we are trying to acquire the lock, force it to return with nil so + // runStandby will exit + // If we are active, signal the standby goroutine to shut down and wait + // for completion. We have the state lock here so nothing else should + // be toggling standby status. + close(c.standbyStopCh) + c.logger.Debug("finished triggering standbyStopCh for runStandby") + + // Wait for runStandby to stop + <-c.standbyDoneCh + atomic.StoreUint32(c.keepHALockOnStepDown, 0) + c.logger.Debug("runStandby done") + } + + // If the storage backend needs to be sealed + if shutdownRaft { + if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok { + if err := raftStorage.TeardownCluster(c.getClusterListener()); err != nil { + c.logger.Error("error stopping storage cluster", "error", err) + return err + } + } + + // Stop the cluster listener + c.stopClusterListener() + } + + c.logger.Debug("sealing barrier") + if err := c.barrier.Seal(); err != nil { + c.logger.Error("error sealing barrier", "error", err) + return err + } + + if c.ha != nil { + sd, ok := c.ha.(physical.ServiceDiscovery) + if ok { + if err := sd.NotifySealedStateChange(); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("failed to notify sealed status", "error", err) + } + } + } + } + + postSealInternal(c) + + c.logger.Info("vault is sealed") + + return nil +} + +type UnsealStrategy interface { + unseal(context.Context, log.Logger, *Core) error +} + +type standardUnsealStrategy struct{} + +func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c *Core) error { + // Clear forwarding clients; we're active + c.requestForwardingConnectionLock.Lock() + c.clearForwardingClients() + c.requestForwardingConnectionLock.Unlock() + + if err := postUnsealPhysical(c); err != nil { + return err + } + + if err := enterprisePostUnseal(c); err != nil { + return err + } + + if !c.IsDRSecondary() { + if err := c.ensureWrappingKey(ctx); err != nil { + return err + } + } + if err := c.setupPluginCatalog(ctx); err != nil { + return err + } + if err := c.loadMounts(ctx); err != nil { + return err + } + if err := enterpriseSetupFilteredPaths(c); err != nil { + return err + } + if err := c.setupMounts(ctx); err != nil { + return err + } + if err := c.setupPolicyStore(ctx); err != nil { + return err + } + if err := c.loadCORSConfig(ctx); err != nil { + return err + } + if err := c.loadCurrentRequestCounters(ctx, time.Now()); err != nil { + return err + } + if err := c.loadCredentials(ctx); err != nil { + return err + } + if err := enterpriseSetupFilteredPaths(c); err != nil { + return err + } + if err := c.setupCredentials(ctx); err != nil { + return err + } + if !c.IsDRSecondary() { + if err := c.startRollback(); err != nil { + return err + } + if err := c.setupExpiration(expireLeaseStrategyRevoke); err != nil { + return err + } + if err := c.loadAudits(ctx); err != nil { + return err + } + if err := c.setupAudits(ctx); err != nil { + return err + } + if err := c.loadIdentityStoreArtifacts(ctx); err != nil { + return err + } + if err := loadMFAConfigs(ctx, c); err != nil { + return err + } + if err := c.setupAuditedHeadersConfig(ctx); err != nil { + return err + } + } else { + c.auditBroker = NewAuditBroker(c.logger) + } + + if c.getClusterListener() != nil && (c.ha != nil || shouldStartClusterListener(c)) { + if err := c.setupRaftActiveNode(ctx); err != nil { + return err + } + + if err := c.startForwarding(ctx); err != nil { + return err + } + + } + + c.clusterParamsLock.Lock() + defer c.clusterParamsLock.Unlock() + if err := startReplication(c); err != nil { + return err + } + + return nil +} + +// postUnseal is invoked on the active node after the barrier is unsealed, but before +// allowing any user operations. This allows us to setup any state that +// requires the Vault to be unsealed such as mount tables, logical backends, +// credential stores, etc. +func (c *Core) postUnseal(ctx context.Context, ctxCancelFunc context.CancelFunc, unsealer UnsealStrategy) (retErr error) { + defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now()) + + // Clear any out + c.postUnsealFuncs = nil + + // Create a new request context + c.activeContext = ctx + c.activeContextCancelFunc.Store(ctxCancelFunc) + + defer func() { + if retErr != nil { + ctxCancelFunc() + c.preSeal() + } + }() + c.logger.Info("post-unseal setup starting") + + // Enable the cache + c.physicalCache.Purge(ctx) + if !c.cachingDisabled { + c.physicalCache.SetEnabled(true) + } + + // Purge these for safety in case of a rekey + c.seal.SetBarrierConfig(ctx, nil) + if c.seal.RecoveryKeySupported() { + c.seal.SetRecoveryConfig(ctx, nil) + } + + if err := unsealer.unseal(ctx, c.logger, c); err != nil { + return err + } + + // Automatically re-encrypt the keys used for auto unsealing when the + // seal's encryption key changes. The regular rotation of cryptographic + // keys is a NIST recommendation. Access to prior keys for decryption + // is normally supported for a configurable time period. Re-encrypting + // the keys used for auto unsealing ensures Vault and its data will + // continue to be accessible even after prior seal keys are destroyed. + if seal, ok := c.seal.(*autoSeal); ok { + if err := seal.UpgradeKeys(c.activeContext); err != nil { + c.logger.Warn("post-unseal upgrade seal keys failed", "error", err) + } + } + + c.metricsCh = make(chan struct{}) + go c.emitMetrics(c.metricsCh) + + // This is intentionally the last block in this function. We want to allow + // writes just before allowing client requests, to ensure everything has + // been set up properly before any writes can have happened. + for _, v := range c.postUnsealFuncs { + v() + } + + if atomic.LoadUint32(c.sealMigrated) == 1 { + defer func() { atomic.StoreUint32(c.sealMigrated, 0) }() + if err := c.postSealMigration(ctx); err != nil { + c.logger.Warn("post-unseal post seal migration failed", "error", err) + } + } + + c.logger.Info("post-unseal setup complete") + return nil +} + +// preSeal is invoked before the barrier is sealed, allowing +// for any state teardown required. +func (c *Core) preSeal() error { + defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now()) + c.logger.Info("pre-seal teardown starting") + + // Clear any pending funcs + c.postUnsealFuncs = nil + + // Clear any rekey progress + c.barrierRekeyConfig = nil + c.recoveryRekeyConfig = nil + + if c.metricsCh != nil { + close(c.metricsCh) + c.metricsCh = nil + } + var result error + + c.stopForwarding() + + c.stopRaftActiveNode() + + c.clusterParamsLock.Lock() + if err := stopReplication(c); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error stopping replication: {{err}}", err)) + } + c.clusterParamsLock.Unlock() + + if err := c.teardownAudits(); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err)) + } + if err := c.stopExpiration(); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err)) + } + if err := c.teardownCredentials(context.Background()); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err)) + } + if err := c.teardownPolicyStore(); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err)) + } + if err := c.stopRollback(); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err)) + } + if err := c.unloadMounts(context.Background()); err != nil { + result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err)) + } + if err := enterprisePreSeal(c); err != nil { + result = multierror.Append(result, err) + } + + preSealPhysical(c) + + c.logger.Info("pre-seal teardown complete") + return result +} + +func enterprisePostUnsealImpl(c *Core) error { + return nil +} + +func enterprisePreSealImpl(c *Core) error { + return nil +} + +func enterpriseSetupFilteredPathsImpl(c *Core) error { + return nil +} + +func startReplicationImpl(c *Core) error { + return nil +} + +func stopReplicationImpl(c *Core) error { + return nil +} + +// emitMetrics is used to periodically expose metrics while running +func (c *Core) emitMetrics(stopCh chan struct{}) { + emitTimer := time.Tick(time.Second) + writeTimer := time.Tick(c.counters.syncInterval) + + for { + select { + case <-emitTimer: + c.metricsMutex.Lock() + if c.expiration != nil { + c.expiration.emitMetrics() + } + c.metricsMutex.Unlock() + + case <-writeTimer: + if stopped := grabLockOrStop(c.stateLock.RLock, c.stateLock.RUnlock, stopCh); stopped { + // Go through the loop again, this time the stop channel case + // should trigger + continue + } + if c.perfStandby { + syncCounter(c) + } else { + err := c.saveCurrentRequestCounters(context.Background(), time.Now()) + if err != nil { + c.logger.Error("writing request counters to barrier", "err", err) + } + } + c.stateLock.RUnlock() + + case <-stopCh: + return + } + } +} + +func (c *Core) ReplicationState() consts.ReplicationState { + return consts.ReplicationState(atomic.LoadUint32(c.replicationState)) +} + +func (c *Core) ActiveNodeReplicationState() consts.ReplicationState { + return consts.ReplicationState(atomic.LoadUint32(c.activeNodeReplicationState)) +} + +func (c *Core) SealAccess() *SealAccess { + return NewSealAccess(c.seal) +} + +// StorageType returns a string equal to the storage configuration's type. +func (c *Core) StorageType() string { + return c.storageType +} + +func (c *Core) Logger() log.Logger { + return c.logger +} + +func (c *Core) BarrierKeyLength() (min, max int) { + min, max = c.barrier.KeyLength() + max += shamir.ShareOverhead + return +} + +func (c *Core) AuditedHeadersConfig() *AuditedHeadersConfig { + return c.auditedHeaders +} + +func waitUntilWALShippedImpl(ctx context.Context, c *Core, index uint64) bool { + return true +} + +func merkleRootImpl(c *Core) string { + return "" +} + +func lastWALImpl(c *Core) uint64 { + return 0 +} + +func lastPerformanceWALImpl(c *Core) uint64 { + return 0 +} + +func lastRemoteWALImpl(c *Core) uint64 { + return 0 +} + +func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfig, error) { + pe, err := c.physical.Get(ctx, barrierSealConfigPath) + if err != nil { + return nil, nil, errwrap.Wrapf("failed to fetch barrier seal configuration at migration check time: {{err}}", err) + } + if pe == nil { + return nil, nil, nil + } + + barrierConf := new(SealConfig) + + if err := jsonutil.DecodeJSON(pe.Value, barrierConf); err != nil { + return nil, nil, errwrap.Wrapf("failed to decode barrier seal configuration at migration check time: {{err}}", err) + } + err = barrierConf.Validate() + if err != nil { + return nil, nil, errwrap.Wrapf("failed to validate barrier seal configuration at migration check time: {{err}}", err) + } + // In older versions of vault the default seal would not store a type. This + // is here to offer backwards compatibility for older seal configs. + if barrierConf.Type == "" { + barrierConf.Type = seal.Shamir + } + + var recoveryConf *SealConfig + pe, err = c.physical.Get(ctx, recoverySealConfigPlaintextPath) + if err != nil { + return nil, nil, errwrap.Wrapf("failed to fetch seal configuration at migration check time: {{err}}", err) + } + if pe != nil { + recoveryConf = &SealConfig{} + if err := jsonutil.DecodeJSON(pe.Value, recoveryConf); err != nil { + return nil, nil, errwrap.Wrapf("failed to decode seal configuration at migration check time: {{err}}", err) + } + err = recoveryConf.Validate() + if err != nil { + return nil, nil, errwrap.Wrapf("failed to validate seal configuration at migration check time: {{err}}", err) + } + // In older versions of vault the default seal would not store a type. This + // is here to offer backwards compatibility for older seal configs. + if recoveryConf.Type == "" { + recoveryConf.Type = seal.Shamir + } + } + + return barrierConf, recoveryConf, nil +} + +func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { + c.stateLock.Lock() + defer c.stateLock.Unlock() + c.unwrapSeal = unwrapSeal + if c.unwrapSeal != nil { + c.unwrapSeal.SetCore(c) + } + if newSeal != nil && migrationSeal != nil { + c.migrationSeal = migrationSeal + c.migrationSeal.SetCore(c) + c.seal = newSeal + c.seal.SetCore(c) + c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationSeal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) + c.initSealsForMigration() + } +} + +// unsealKeyToMasterKey takes a key provided by the user, either a recovery key +// if using an autoseal or an unseal key with Shamir. It returns a nil error +// if the key is valid and an error otherwise. It also returns the master key +// that can be used to unseal the barrier. +func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([]byte, error) { + switch c.seal.StoredKeysSupported() { + case StoredKeysSupportedGeneric: + if err := c.seal.VerifyRecoveryKey(ctx, combinedKey); err != nil { + return nil, errwrap.Wrapf("recovery key verification failed: {{err}}", err) + } + + storedKeys, err := c.seal.GetStoredKeys(ctx) + if err == nil && len(storedKeys) != 1 { + err = fmt.Errorf("expected exactly one stored key, got %d", len(storedKeys)) + } + if err != nil { + return nil, errwrap.Wrapf("unable to retrieve stored keys", err) + } + return storedKeys[0], nil + + case StoredKeysSupportedShamirMaster: + testseal := NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("testseal"))) + testseal.SetCore(c) + cfg, err := c.seal.BarrierConfig(ctx) + if err != nil { + return nil, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err) + } + testseal.SetCachedBarrierConfig(cfg) + err = testseal.GetAccess().(*shamirseal.ShamirSeal).SetKey(combinedKey) + if err != nil { + return nil, errwrap.Wrapf("failed to setup unseal key: {{err}}", err) + } + storedKeys, err := testseal.GetStoredKeys(ctx) + if err == nil && len(storedKeys) != 1 { + err = fmt.Errorf("expected exactly one stored key, got %d", len(storedKeys)) + } + if err != nil { + return nil, errwrap.Wrapf("unable to retrieve stored keys", err) + } + return storedKeys[0], nil + + case StoredKeysNotSupported: + return combinedKey, nil + } + return nil, fmt.Errorf("invalid seal") +} + +func (c *Core) IsInSealMigration() bool { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + return c.migrationSeal != nil +} + +func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess { + return NewBarrierEncryptorAccess(c.barrier) +} + +func (c *Core) PhysicalAccess() *physical.PhysicalAccess { + return physical.NewPhysicalAccess(c.physical) +} + +func (c *Core) RouterAccess() *RouterAccess { + return NewRouterAccess(c) +} + +// IsDRSecondary returns if the current cluster state is a DR secondary. +func (c *Core) IsDRSecondary() bool { + return c.ReplicationState().HasState(consts.ReplicationDRSecondary) +} + +func (c *Core) AddLogger(logger log.Logger) { + c.allLoggersLock.Lock() + defer c.allLoggersLock.Unlock() + c.allLoggers = append(c.allLoggers, logger) +} + +func (c *Core) SetLogLevel(level log.Level) { + c.allLoggersLock.RLock() + defer c.allLoggersLock.RUnlock() + for _, logger := range c.allLoggers { + logger.SetLevel(level) + } +} + +// SetConfig sets core's config object to the newly provided config. +func (c *Core) SetConfig(conf *server.Config) { + c.stateLock.Lock() + c.rawConfig = conf + c.stateLock.Unlock() +} + +// SanitizedConfig returns a sanitized version of the current config. +// See server.Config.Sanitized for specific values omitted. +func (c *Core) SanitizedConfig() map[string]interface{} { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + return c.rawConfig.Sanitized() +} + +// MetricsHelper returns the global metrics helper which allows external +// packages to access Vault's internal metrics. +func (c *Core) MetricsHelper() *metricsutil.MetricsHelper { + return c.metricsHelper +} + +// BuiltinRegistry is an interface that allows the "vault" package to use +// the registry of builtin plugins without getting an import cycle. It +// also allows for mocking the registry easily. +type BuiltinRegistry interface { + Contains(name string, pluginType consts.PluginType) bool + Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) + Keys(pluginType consts.PluginType) []string +} diff --git a/vendor/github.com/hashicorp/vault/vault/core_util.go b/vendor/github.com/hashicorp/vault/vault/core_util.go new file mode 100644 index 00000000..192eaebe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/core_util.go @@ -0,0 +1,125 @@ +// +build !enterprise + +package vault + +import ( + "context" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/replication" +) + +type entCore struct{} + +type LicensingConfig struct { + AdditionalPublicKeys []interface{} +} + +func coreInit(c *Core, conf *CoreConfig) error { + phys := conf.Physical + _, txnOK := phys.(physical.Transactional) + sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper") + c.allLoggers = append(c.allLoggers, sealUnwrapperLogger) + c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger) + // Wrap the physical backend in a cache layer if enabled + cacheLogger := c.baseLogger.Named("storage.cache") + c.allLoggers = append(c.allLoggers, cacheLogger) + if txnOK { + c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger) + } else { + c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, cacheLogger) + } + c.physicalCache = c.physical.(physical.ToggleablePurgemonster) + + // Wrap in encoding checks + if !conf.DisableKeyEncodingChecks { + c.physical = physical.NewStorageEncoding(c.physical) + } + return nil +} + +func createSecondaries(*Core, *CoreConfig) {} + +func addExtraLogicalBackends(*Core, map[string]logical.Factory) {} + +func addExtraCredentialBackends(*Core, map[string]logical.Factory) {} + +func preUnsealInternal(context.Context, *Core) error { return nil } + +func postSealInternal(*Core) {} + +func preSealPhysical(c *Core) { + switch c.sealUnwrapper.(type) { + case *sealUnwrapper: + c.sealUnwrapper.(*sealUnwrapper).stopUnwraps() + case *transactionalSealUnwrapper: + c.sealUnwrapper.(*transactionalSealUnwrapper).stopUnwraps() + } + + // Purge the cache + c.physicalCache.SetEnabled(false) + c.physicalCache.Purge(context.Background()) +} + +func postUnsealPhysical(c *Core) error { + switch c.sealUnwrapper.(type) { + case *sealUnwrapper: + c.sealUnwrapper.(*sealUnwrapper).runUnwraps() + case *transactionalSealUnwrapper: + c.sealUnwrapper.(*transactionalSealUnwrapper).runUnwraps() + } + return nil +} + +func loadMFAConfigs(context.Context, *Core) error { return nil } + +func shouldStartClusterListener(*Core) bool { return true } + +func hasNamespaces(*Core) bool { return false } + +func (c *Core) Features() license.Features { + return license.FeatureNone +} + +func (c *Core) HasFeature(license.Features) bool { + return false +} + +func (c *Core) collectNamespaces() []*namespace.Namespace { + return []*namespace.Namespace{ + namespace.RootNamespace, + } +} + +func (c *Core) namepaceByPath(string) *namespace.Namespace { + return namespace.RootNamespace +} + +func (c *Core) setupReplicatedClusterPrimary(*replication.Cluster) error { return nil } + +func (c *Core) perfStandbyCount() int { return 0 } + +func (c *Core) removePathFromFilteredPaths(context.Context, string, string) error { + return nil +} + +func (c *Core) checkReplicatedFiltering(context.Context, *MountEntry, string) (bool, error) { + return false, nil +} + +func (c *Core) invalidateSentinelPolicy(PolicyType, string) {} + +func (c *Core) removePerfStandbySecondary(context.Context, string) {} + +func (c *Core) removeAllPerfStandbySecondaries() {} + +func (c *Core) perfStandbyClusterHandler() (*replication.Cluster, chan struct{}, error) { + return nil, make(chan struct{}), nil +} + +func (c *Core) initSealsForMigration() {} + +func (c *Core) postSealMigration(ctx context.Context) error { return nil } diff --git a/vendor/github.com/hashicorp/vault/vault/cors.go b/vendor/github.com/hashicorp/vault/vault/cors.go new file mode 100644 index 00000000..8a9533b0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/cors.go @@ -0,0 +1,163 @@ +package vault + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + CORSDisabled uint32 = iota + CORSEnabled +) + +var StdAllowedHeaders = []string{ + "Content-Type", + "X-Requested-With", + "X-Vault-AWS-IAM-Server-ID", + "X-Vault-MFA", + "X-Vault-No-Request-Forwarding", + "X-Vault-Wrap-Format", + "X-Vault-Wrap-TTL", + "X-Vault-Policy-Override", + "Authorization", + consts.AuthHeaderName, +} + +// CORSConfig stores the state of the CORS configuration. +type CORSConfig struct { + sync.RWMutex `json:"-"` + core *Core + Enabled *uint32 `json:"enabled"` + AllowedOrigins []string `json:"allowed_origins,omitempty"` + AllowedHeaders []string `json:"allowed_headers,omitempty"` +} + +func (c *Core) saveCORSConfig(ctx context.Context) error { + view := c.systemBarrierView.SubView("config/") + + enabled := atomic.LoadUint32(c.corsConfig.Enabled) + localConfig := &CORSConfig{ + Enabled: &enabled, + } + c.corsConfig.RLock() + localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins + localConfig.AllowedHeaders = c.corsConfig.AllowedHeaders + c.corsConfig.RUnlock() + + entry, err := logical.StorageEntryJSON("cors", localConfig) + if err != nil { + return errwrap.Wrapf("failed to create CORS config entry: {{err}}", err) + } + + if err := view.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to save CORS config: {{err}}", err) + } + + return nil +} + +// This should only be called with the core state lock held for writing +func (c *Core) loadCORSConfig(ctx context.Context) error { + view := c.systemBarrierView.SubView("config/") + + // Load the config in + out, err := view.Get(ctx, "cors") + if err != nil { + return errwrap.Wrapf("failed to read CORS config: {{err}}", err) + } + if out == nil { + return nil + } + + newConfig := new(CORSConfig) + err = out.DecodeJSON(newConfig) + if err != nil { + return err + } + + if newConfig.Enabled == nil { + newConfig.Enabled = new(uint32) + } + + newConfig.core = c + + c.corsConfig = newConfig + + return nil +} + +// Enable takes either a '*' or a comma-separated list of URLs that can make +// cross-origin requests to Vault. +func (c *CORSConfig) Enable(ctx context.Context, urls []string, headers []string) error { + if len(urls) == 0 { + return errors.New("at least one origin or the wildcard must be provided") + } + + if strutil.StrListContains(urls, "*") && len(urls) > 1 { + return errors.New("to allow all origins the '*' must be the only value for allowed_origins") + } + + c.Lock() + c.AllowedOrigins = urls + + // Start with the standard headers to Vault accepts. + c.AllowedHeaders = append([]string{}, StdAllowedHeaders...) + + // Allow the user to add additional headers to the list of + // headers allowed on cross-origin requests. + if len(headers) > 0 { + c.AllowedHeaders = append(c.AllowedHeaders, headers...) + } + c.Unlock() + + atomic.StoreUint32(c.Enabled, CORSEnabled) + + return c.core.saveCORSConfig(ctx) +} + +// IsEnabled returns the value of CORSConfig.isEnabled +func (c *CORSConfig) IsEnabled() bool { + return atomic.LoadUint32(c.Enabled) == CORSEnabled +} + +// Disable sets CORS to disabled and clears the allowed origins & headers. +func (c *CORSConfig) Disable(ctx context.Context) error { + atomic.StoreUint32(c.Enabled, CORSDisabled) + c.Lock() + + c.AllowedOrigins = nil + c.AllowedHeaders = nil + + c.Unlock() + + return c.core.saveCORSConfig(ctx) +} + +// IsValidOrigin determines if the origin of the request is allowed to make +// cross-origin requests based on the CORSConfig. +func (c *CORSConfig) IsValidOrigin(origin string) bool { + // If we aren't enabling CORS then all origins are valid + if !c.IsEnabled() { + return true + } + + c.RLock() + defer c.RUnlock() + + if len(c.AllowedOrigins) == 0 { + return false + } + + if len(c.AllowedOrigins) == 1 && (c.AllowedOrigins)[0] == "*" { + return true + } + + return strutil.StrListContains(c.AllowedOrigins, origin) +} diff --git a/vendor/github.com/hashicorp/vault/vault/counters.go b/vendor/github.com/hashicorp/vault/vault/counters.go new file mode 100644 index 00000000..b1cb40c7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/counters.go @@ -0,0 +1,238 @@ +package vault + +import ( + "context" + "sort" + "sync/atomic" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + requestCounterDatePathFormat = "2006/01" + countersPath = systemBarrierPrefix + "counters" + requestCountersPath = "sys/counters/requests/" +) + +type counters struct { + // requests counts requests seen by Vault this month; does not include requests + // excluded by design, e.g. health checks and UI asset requests. + requests *uint64 + // activePath is set at startup to the path we primed the requests counter from, + // or empty string if there wasn't a relevant path - either because this is the first + // time Vault starts with the feature enabled, or because Vault hadn't written + // out the request counter this month yet. + // Whenever we write out the counters, we update activePath if it's no longer + // accurate. This coincides with a reset of the counters. + // There's no lock because the only reader/writer of activePath is the goroutine + // doing background syncs. + activePath string + // syncInterval determines how often the counters get written to storage (on primary) + // or synced to primary. + syncInterval time.Duration +} + +// RequestCounter stores the state of request counters for a single unspecified period. +type RequestCounter struct { + // Total is the number of requests seen during a given period. + Total *uint64 `json:"total"` +} + +// DatedRequestCounter holds request counters from a single period of time. +type DatedRequestCounter struct { + // StartTime is when the period starts. + StartTime time.Time `json:"start_time"` + // RequestCounter counts requests. + RequestCounter +} + +// loadAllRequestCounters returns all request counters found in storage, +// ordered by time (oldest first.) +func (c *Core) loadAllRequestCounters(ctx context.Context, now time.Time) ([]DatedRequestCounter, error) { + view := NewBarrierView(c.barrier, requestCountersPath) + + datepaths, err := view.List(ctx, "") + if err != nil { + return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + } + + var all []DatedRequestCounter + sort.Strings(datepaths) + for _, datepath := range datepaths { + datesubpaths, err := view.List(ctx, datepath) + if err != nil { + return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + } + sort.Strings(datesubpaths) + for _, datesubpath := range datesubpaths { + fullpath := datepath + datesubpath + counter, err := c.loadRequestCounters(ctx, fullpath) + if err != nil { + return nil, err + } + + t, err := time.Parse(requestCounterDatePathFormat, fullpath) + if err != nil { + return nil, err + } + + all = append(all, DatedRequestCounter{StartTime: t, RequestCounter: *counter}) + } + } + + start, _ := time.Parse(requestCounterDatePathFormat, now.Format(requestCounterDatePathFormat)) + idx := sort.Search(len(all), func(i int) bool { + return !all[i].StartTime.Before(start) + }) + cur := atomic.LoadUint64(c.counters.requests) + if idx < len(all) { + all[idx].RequestCounter.Total = &cur + } else { + all = append(all, DatedRequestCounter{StartTime: start, RequestCounter: RequestCounter{Total: &cur}}) + } + + return all, nil +} + +// loadCurrentRequestCounters reads the current RequestCounter out of storage. +// The in-memory current request counter is populated with the value read, if any. +// now should be the current time; it is a parameter to facilitate testing. +func (c *Core) loadCurrentRequestCounters(ctx context.Context, now time.Time) error { + datepath := now.Format(requestCounterDatePathFormat) + counter, err := c.loadRequestCounters(ctx, datepath) + if err != nil { + return err + } + if counter != nil { + c.counters.activePath = datepath + atomic.StoreUint64(c.counters.requests, *counter.Total) + } + return nil +} + +// loadRequestCounters reads a RequestCounter out of storage at location datepath. +// If nothing is found at that path, that isn't an error: a reference to a zero +// RequestCounter is returned. +func (c *Core) loadRequestCounters(ctx context.Context, datepath string) (*RequestCounter, error) { + view := NewBarrierView(c.barrier, requestCountersPath) + + out, err := view.Get(ctx, datepath) + if err != nil { + return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + } + if out == nil { + return nil, nil + } + + newCounters := &RequestCounter{} + err = out.DecodeJSON(newCounters) + if err != nil { + return nil, err + } + + return newCounters, nil +} + +// saveCurrentRequestCounters writes the current RequestCounter to storage. +// The in-memory current request counter is reset to zero after writing if +// we've entered a new month. +// now should be the current time; it is a parameter to facilitate testing. +func (c *Core) saveCurrentRequestCounters(ctx context.Context, now time.Time) error { + view := NewBarrierView(c.barrier, requestCountersPath) + requests := atomic.LoadUint64(c.counters.requests) + curDatePath := now.Format(requestCounterDatePathFormat) + + // If activePath is empty string, we were started with nothing in storage + // for the current month, so we should not reset the in-mem counter. + // But if activePath is nonempty and not curDatePath, we should reset. + shouldReset, writeDatePath := false, curDatePath + if c.counters.activePath != "" && c.counters.activePath != curDatePath { + shouldReset, writeDatePath = true, c.counters.activePath + } + + localCounters := &RequestCounter{ + Total: &requests, + } + entry, err := logical.StorageEntryJSON(writeDatePath, localCounters) + if err != nil { + return errwrap.Wrapf("failed to create request counters entry: {{err}}", err) + } + + if err := view.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to save request counters: {{err}}", err) + } + + if shouldReset { + atomic.StoreUint64(c.counters.requests, 0) + } + if c.counters.activePath != curDatePath { + c.counters.activePath = curDatePath + } + + return nil +} + +// ActiveTokens contains the number of active tokens. +type ActiveTokens struct { + // ServiceTokens contains information about the number of active service + // tokens. + ServiceTokens TokenCounter `json:"service_tokens"` +} + +// TokenCounter counts the number of tokens +type TokenCounter struct { + // Total is the total number of tokens + Total int `json:"total"` +} + +// countActiveTokens returns the number of active tokens +func (c *Core) countActiveTokens(ctx context.Context) (*ActiveTokens, error) { + + // Get all of the namespaces + ns := c.collectNamespaces() + + // Count the tokens under each namespace + total := 0 + for i := 0; i < len(ns); i++ { + ids, err := c.tokenStore.idView(ns[i]).List(ctx, "") + if err != nil { + return nil, err + } + total += len(ids) + } + + return &ActiveTokens{ + ServiceTokens: TokenCounter{ + Total: total, + }, + }, nil +} + +// ActiveEntities contains the number of active entities. +type ActiveEntities struct { + // Entities contains information about the number of active entities. + Entities EntityCounter `json:"entities"` +} + +// EntityCounter counts the number of entities +type EntityCounter struct { + // Total is the total number of entities + Total int `json:"total"` +} + +// countActiveEntities returns the number of active entities +func (c *Core) countActiveEntities(ctx context.Context) (*ActiveEntities, error) { + + count, err := c.identityStore.countEntities() + if err != nil { + return nil, err + } + + return &ActiveEntities{ + Entities: EntityCounter{ + Total: count, + }, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go new file mode 100644 index 00000000..72dc9f72 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go @@ -0,0 +1,297 @@ +package vault + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/errwrap" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" +) + +type ctxKeyForwardedRequestMountAccessor struct{} + +func (c ctxKeyForwardedRequestMountAccessor) String() string { + return "forwarded-req-mount-accessor" +} + +type dynamicSystemView struct { + core *Core + mountEntry *MountEntry +} + +type extendedSystemView interface { + logical.SystemView + logical.ExtendedSystemView + // SudoPrivilege won't work over the plugin system so we keep it here + // instead of in sdk/logical to avoid exposing to plugins + SudoPrivilege(context.Context, string, string) bool +} + +type extendedSystemViewImpl struct { + dynamicSystemView +} + +func (e extendedSystemViewImpl) Auditor() logical.Auditor { + return genericAuditor{ + mountType: e.mountEntry.Type, + namespace: e.mountEntry.Namespace(), + c: e.core, + } +} + +func (e extendedSystemViewImpl) ForwardGenericRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // Forward the request if allowed + if couldForward(e.core) { + ctx = namespace.ContextWithNamespace(ctx, e.mountEntry.Namespace()) + ctx = context.WithValue(ctx, ctxKeyForwardedRequestMountAccessor{}, e.mountEntry.Accessor) + return forward(ctx, e.core, req) + } + + return nil, logical.ErrReadOnly +} + +// SudoPrivilege returns true if given path has sudo privileges +// for the given client token +func (e extendedSystemViewImpl) SudoPrivilege(ctx context.Context, path string, token string) bool { + // Resolve the token policy + te, err := e.core.tokenStore.Lookup(ctx, token) + if err != nil { + e.core.logger.Error("failed to lookup token", "error", err) + return false + } + + // Ensure the token is valid + if te == nil { + e.core.logger.Error("entry not found for given token") + return false + } + + policies := make(map[string][]string) + // Add token policies + policies[te.NamespaceID] = append(policies[te.NamespaceID], te.Policies...) + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, e.core) + if err != nil { + e.core.logger.Error("failed to lookup token namespace", "error", err) + return false + } + if tokenNS == nil { + e.core.logger.Error("failed to lookup token namespace", "error", namespace.ErrNoNamespace) + return false + } + + // Add identity policies from all the namespaces + entity, identityPolicies, err := e.core.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID) + if err != nil { + e.core.logger.Error("failed to fetch identity policies", "error", err) + return false + } + for nsID, nsPolicies := range identityPolicies { + policies[nsID] = append(policies[nsID], nsPolicies...) + } + + tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS) + + // Construct the corresponding ACL object. Derive and use a new context that + // uses the req.ClientToken's namespace + acl, err := e.core.policyStore.ACL(tokenCtx, entity, policies) + if err != nil { + e.core.logger.Error("failed to retrieve ACL for token's policies", "token_policies", te.Policies, "error", err) + return false + } + + // The operation type isn't important here as this is run from a path the + // user has already been given access to; we only care about whether they + // have sudo. Note that we use root context because the path that comes in + // must be fully-qualified already so we don't want AllowOperation to + // prepend a namespace prefix onto it. + req := new(logical.Request) + req.Operation = logical.ReadOperation + req.Path = path + authResults := acl.AllowOperation(namespace.RootContext(ctx), req, true) + return authResults.RootPrivs +} + +func (d dynamicSystemView) DefaultLeaseTTL() time.Duration { + def, _ := d.fetchTTLs() + return def +} + +func (d dynamicSystemView) MaxLeaseTTL() time.Duration { + _, max := d.fetchTTLs() + return max +} + +// TTLsByPath returns the default and max TTLs corresponding to a particular +// mount point, or the system default +func (d dynamicSystemView) fetchTTLs() (def, max time.Duration) { + def = d.core.defaultLeaseTTL + max = d.core.maxLeaseTTL + + if d.mountEntry != nil { + if d.mountEntry.Config.DefaultLeaseTTL != 0 { + def = d.mountEntry.Config.DefaultLeaseTTL + } + if d.mountEntry.Config.MaxLeaseTTL != 0 { + max = d.mountEntry.Config.MaxLeaseTTL + } + } + + return +} + +// Tainted indicates that the mount is in the process of being removed +func (d dynamicSystemView) Tainted() bool { + return d.mountEntry.Tainted +} + +// CachingDisabled indicates whether to use caching behavior +func (d dynamicSystemView) CachingDisabled() bool { + return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache) +} + +func (d dynamicSystemView) LocalMount() bool { + return d.mountEntry != nil && d.mountEntry.Local +} + +// Checks if this is a primary Vault instance. Caller should hold the stateLock +// in read mode. +func (d dynamicSystemView) ReplicationState() consts.ReplicationState { + state := d.core.ReplicationState() + if d.core.perfStandby { + state |= consts.ReplicationPerformanceStandby + } + return state +} + +func (d dynamicSystemView) HasFeature(feature license.Features) bool { + return d.core.HasFeature(feature) +} + +// ResponseWrapData wraps the given data in a cubbyhole and returns the +// token used to unwrap. +func (d dynamicSystemView) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "sys/wrapping/wrap", + } + + resp := &logical.Response{ + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: ttl, + }, + Data: data, + } + + if jwt { + resp.WrapInfo.Format = "jwt" + } + + _, err := d.core.wrapInCubbyhole(ctx, req, resp, nil) + if err != nil { + return nil, err + } + + return resp.WrapInfo, nil +} + +// LookupPlugin looks for a plugin with the given name in the plugin catalog. It +// returns a PluginRunner or an error if no plugin was found. +func (d dynamicSystemView) LookupPlugin(ctx context.Context, name string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) { + if d.core == nil { + return nil, fmt.Errorf("system view core is nil") + } + if d.core.pluginCatalog == nil { + return nil, fmt.Errorf("system view core plugin catalog is nil") + } + r, err := d.core.pluginCatalog.Get(ctx, name, pluginType) + if err != nil { + return nil, err + } + if r == nil { + return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound) + } + + return r, nil +} + +// MlockEnabled returns the configuration setting for enabling mlock on plugins. +func (d dynamicSystemView) MlockEnabled() bool { + return d.core.enableMlock +} + +func (d dynamicSystemView) EntityInfo(entityID string) (*logical.Entity, error) { + // Requests from token created from the token backend will not have entity information. + // Return missing entity instead of error when requesting from MemDB. + if entityID == "" { + return nil, nil + } + + if d.core == nil { + return nil, fmt.Errorf("system view core is nil") + } + if d.core.identityStore == nil { + return nil, fmt.Errorf("system view identity store is nil") + } + + // Retrieve the entity from MemDB + entity, err := d.core.identityStore.MemDBEntityByID(entityID, false) + if err != nil { + return nil, err + } + if entity == nil { + return nil, nil + } + + // Return a subset of the data + ret := &logical.Entity{ + ID: entity.ID, + Name: entity.Name, + Disabled: entity.Disabled, + } + + if entity.Metadata != nil { + ret.Metadata = make(map[string]string, len(entity.Metadata)) + for k, v := range entity.Metadata { + ret.Metadata[k] = v + } + } + + aliases := make([]*logical.Alias, len(entity.Aliases)) + for i, a := range entity.Aliases { + alias := &logical.Alias{ + MountAccessor: a.MountAccessor, + Name: a.Name, + } + // MountType is not stored with the entity and must be looked up + if mount := d.core.router.validateMountByAccessor(a.MountAccessor); mount != nil { + alias.MountType = mount.MountType + } + + if a.Metadata != nil { + alias.Metadata = make(map[string]string, len(a.Metadata)) + for k, v := range a.Metadata { + alias.Metadata[k] = v + } + } + + aliases[i] = alias + } + ret.Aliases = aliases + + return ret, nil +} + +func (d dynamicSystemView) PluginEnv(_ context.Context) (*logical.PluginEnvironment, error) { + return &logical.PluginEnvironment{ + VaultVersion: version.GetVersion().Version, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go new file mode 100644 index 00000000..303b9058 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/expiration.go @@ -0,0 +1,1865 @@ +package vault + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "strings" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + uberAtomic "go.uber.org/atomic" +) + +const ( + // expirationSubPath is the sub-path used for the expiration manager + // view. This is nested under the system view. + expirationSubPath = "expire/" + + // leaseViewPrefix is the prefix used for the ID based lookup of leases. + leaseViewPrefix = "id/" + + // tokenViewPrefix is the prefix used for the token based lookup of leases. + tokenViewPrefix = "token/" + + // maxRevokeAttempts limits how many revoke attempts are made + maxRevokeAttempts = 6 + + // revokeRetryBase is a baseline retry time + revokeRetryBase = 10 * time.Second + + // maxLeaseDuration is the default maximum lease duration + maxLeaseTTL = 32 * 24 * time.Hour + + // defaultLeaseDuration is the default lease duration used when no lease is specified + defaultLeaseTTL = maxLeaseTTL + + // maxLeaseThreshold is the maximum lease count before generating log warning + maxLeaseThreshold = 256000 +) + +type pendingInfo struct { + exportLeaseTimes *leaseEntry + timer *time.Timer +} + +// ExpirationManager is used by the Core to manage leases. Secrets +// can provide a lease, meaning that they can be renewed or revoked. +// If a secret is not renewed in timely manner, it may be expired, and +// the ExpirationManager will handle doing automatic revocation. +type ExpirationManager struct { + core *Core + router *Router + idView *BarrierView + tokenView *BarrierView + tokenStore *TokenStore + logger log.Logger + + pending map[string]pendingInfo + pendingLock sync.RWMutex + + tidyLock *int32 + + restoreMode *int32 + restoreModeLock sync.RWMutex + restoreRequestLock sync.RWMutex + restoreLocks []*locksutil.LockEntry + restoreLoaded sync.Map + quitCh chan struct{} + + coreStateLock *sync.RWMutex + quitContext context.Context + leaseCheckCounter *uint32 + + logLeaseExpirations bool + expireFunc ExpireLeaseStrategy + + // testRegisterAuthFailure, if set to true, triggers an explicit failure on + // RegisterAuth to simulate a partial failure during a token creation + // request. This value should only be set by tests. + testRegisterAuthFailure uberAtomic.Bool +} + +type ExpireLeaseStrategy func(context.Context, *ExpirationManager, *leaseEntry) + +// revokeIDFunc is invoked when a given ID is expired +func expireLeaseStrategyRevoke(ctx context.Context, m *ExpirationManager, le *leaseEntry) { + for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ { + revokeCtx, cancel := context.WithTimeout(ctx, DefaultMaxRequestDuration) + revokeCtx = namespace.ContextWithNamespace(revokeCtx, le.namespace) + + go func() { + select { + case <-ctx.Done(): + case <-m.quitCh: + cancel() + case <-revokeCtx.Done(): + } + }() + + select { + case <-m.quitCh: + m.logger.Error("shutting down, not attempting further revocation of lease", "lease_id", le.LeaseID) + cancel() + return + case <-m.quitContext.Done(): + m.logger.Error("core context canceled, not attempting further revocation of lease", "lease_id", le.LeaseID) + cancel() + return + default: + } + + m.coreStateLock.RLock() + err := m.Revoke(revokeCtx, le.LeaseID) + m.coreStateLock.RUnlock() + cancel() + if err == nil { + return + } + + m.logger.Error("failed to revoke lease", "lease_id", le.LeaseID, "error", err) + time.Sleep((1 << attempt) * revokeRetryBase) + } + m.logger.Error("maximum revoke attempts reached", "lease_id", le.LeaseID) +} + +// NewExpirationManager creates a new ExpirationManager that is backed +// using a given view, and uses the provided router for revocation. +func NewExpirationManager(c *Core, view *BarrierView, e ExpireLeaseStrategy, logger log.Logger) *ExpirationManager { + exp := &ExpirationManager{ + core: c, + router: c.router, + idView: view.SubView(leaseViewPrefix), + tokenView: view.SubView(tokenViewPrefix), + tokenStore: c.tokenStore, + logger: logger, + pending: make(map[string]pendingInfo), + tidyLock: new(int32), + + // new instances of the expiration manager will go immediately into + // restore mode + restoreMode: new(int32), + restoreLocks: locksutil.CreateLocks(), + quitCh: make(chan struct{}), + + coreStateLock: &c.stateLock, + quitContext: c.activeContext, + leaseCheckCounter: new(uint32), + + logLeaseExpirations: os.Getenv("VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS") == "", + expireFunc: e, + } + *exp.restoreMode = 1 + + if exp.logger == nil { + opts := log.LoggerOptions{Name: "expiration_manager"} + exp.logger = log.New(&opts) + } + + return exp +} + +// setupExpiration is invoked after we've loaded the mount table to +// initialize the expiration manager +func (c *Core) setupExpiration(e ExpireLeaseStrategy) error { + c.metricsMutex.Lock() + defer c.metricsMutex.Unlock() + // Create a sub-view + view := c.systemBarrierView.SubView(expirationSubPath) + + // Create the manager + expLogger := c.baseLogger.Named("expiration") + c.AddLogger(expLogger) + mgr := NewExpirationManager(c, view, e, expLogger) + c.expiration = mgr + + // Link the token store to this + c.tokenStore.SetExpirationManager(mgr) + + // Restore the existing state + c.logger.Info("restoring leases") + errorFunc := func() { + c.logger.Error("shutting down") + if err := c.Shutdown(); err != nil { + c.logger.Error("error shutting down core", "error", err) + } + } + go c.expiration.Restore(errorFunc) + + return nil +} + +// stopExpiration is used to stop the expiration manager before +// sealing the Vault. +func (c *Core) stopExpiration() error { + if c.expiration != nil { + if err := c.expiration.Stop(); err != nil { + return err + } + c.metricsMutex.Lock() + defer c.metricsMutex.Unlock() + c.expiration = nil + } + return nil +} + +// lockLease takes out a lock for a given lease ID +func (m *ExpirationManager) lockLease(leaseID string) { + locksutil.LockForKey(m.restoreLocks, leaseID).Lock() +} + +// unlockLease unlocks a given lease ID +func (m *ExpirationManager) unlockLease(leaseID string) { + locksutil.LockForKey(m.restoreLocks, leaseID).Unlock() +} + +// inRestoreMode returns if we are currently in restore mode +func (m *ExpirationManager) inRestoreMode() bool { + return atomic.LoadInt32(m.restoreMode) == 1 +} + +func (m *ExpirationManager) invalidate(key string) { + + switch { + case strings.HasPrefix(key, leaseViewPrefix): + // Clear from the pending expiration + leaseID := strings.TrimPrefix(key, leaseViewPrefix) + m.pendingLock.Lock() + if pending, ok := m.pending[leaseID]; ok { + pending.timer.Stop() + delete(m.pending, leaseID) + } + m.pendingLock.Unlock() + } +} + +// Tidy cleans up the dangling storage entries for leases. It scans the storage +// view to find all the available leases, checks if the token embedded in it is +// either empty or invalid and in both the cases, it revokes them. It also uses +// a token cache to avoid multiple lookups of the same token ID. It is normally +// not required to use the API that invokes this. This is only intended to +// clean up the corrupt storage due to bugs. +func (m *ExpirationManager) Tidy(ctx context.Context) error { + if m.inRestoreMode() { + return errors.New("cannot run tidy while restoring leases") + } + + var tidyErrors *multierror.Error + + logger := m.logger.Named("tidy") + m.core.AddLogger(logger) + + if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) { + logger.Warn("tidy operation on leases is already in progress") + return nil + } + + defer atomic.CompareAndSwapInt32(m.tidyLock, 1, 0) + + logger.Info("beginning tidy operation on leases") + defer logger.Info("finished tidy operation on leases") + + // Create a cache to keep track of looked up tokens + tokenCache := make(map[string]bool) + var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64 + + tidyFunc := func(leaseID string) { + countLease++ + if countLease%500 == 0 { + logger.Info("tidying leases", "progress", countLease) + } + + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to load the lease ID %q: {{err}}", leaseID), err)) + return + } + + if le == nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("nil entry for lease ID %q: {{err}}", leaseID), err)) + return + } + + var isValid, ok bool + revokeLease := false + if le.ClientToken == "" { + logger.Debug("revoking lease which has an empty token", "lease_id", leaseID) + revokeLease = true + deletedCountEmptyToken++ + goto REVOKE_CHECK + } + + isValid, ok = tokenCache[le.ClientToken] + if !ok { + lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken) + lock.RLock() + te, err := m.tokenStore.lookupInternal(ctx, le.ClientToken, false, true) + lock.RUnlock() + + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup token: {{err}}", err)) + return + } + + if te == nil { + logger.Debug("revoking lease which holds an invalid token", "lease_id", leaseID) + revokeLease = true + deletedCountInvalidToken++ + tokenCache[le.ClientToken] = false + } else { + tokenCache[le.ClientToken] = true + } + goto REVOKE_CHECK + } else { + if isValid { + return + } + + logger.Debug("revoking lease which contains an invalid token", "lease_id", leaseID) + revokeLease = true + deletedCountInvalidToken++ + goto REVOKE_CHECK + } + + REVOKE_CHECK: + if revokeLease { + // Force the revocation and skip going through the token store + // again + err = m.revokeCommon(ctx, leaseID, true, true) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke an invalid lease with ID %q: {{err}}", leaseID), err)) + return + } + revokedCount++ + } + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + leaseView := m.leaseView(ns) + if err := logical.ScanView(m.quitContext, leaseView, tidyFunc); err != nil { + return err + } + + logger.Info("number of leases scanned", "count", countLease) + logger.Info("number of leases which had empty tokens", "count", deletedCountEmptyToken) + logger.Info("number of leases which had invalid tokens", "count", deletedCountInvalidToken) + logger.Info("number of leases successfully revoked", "count", revokedCount) + + return tidyErrors.ErrorOrNil() +} + +// Restore is used to recover the lease states when starting. +// This is used after starting the vault. +func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { + defer func() { + // Turn off restore mode. We can do this safely without the lock because + // if restore mode finished successfully, restore mode was already + // disabled with the lock. In an error state, this will allow the + // Stop() function to shut everything down. + atomic.StoreInt32(m.restoreMode, 0) + + switch { + case retErr == nil: + case strings.Contains(retErr.Error(), context.Canceled.Error()): + // Don't run error func because we lost leadership + m.logger.Warn("context canceled while restoring leases, stopping lease loading") + retErr = nil + case errwrap.Contains(retErr, ErrBarrierSealed.Error()): + // Don't run error func because we're likely already shutting down + m.logger.Warn("barrier sealed while restoring leases, stopping lease loading") + retErr = nil + default: + m.logger.Error("error restoring leases", "error", retErr) + if errorFunc != nil { + errorFunc() + } + } + }() + + // Accumulate existing leases + m.logger.Debug("collecting leases") + existing, leaseCount, err := m.collectLeases() + if err != nil { + return err + } + m.logger.Debug("leases collected", "num_existing", leaseCount) + + // Make the channels used for the worker pool + type lease struct { + namespace *namespace.Namespace + id string + } + broker := make(chan *lease) + quit := make(chan bool) + // Buffer these channels to prevent deadlocks + errs := make(chan error, len(existing)) + result := make(chan struct{}, len(existing)) + + // Use a wait group + wg := &sync.WaitGroup{} + + // Create 64 workers to distribute work to + for i := 0; i < consts.ExpirationRestoreWorkerCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for { + select { + case lease, ok := <-broker: + // broker has been closed, we are done + if !ok { + return + } + + ctx := namespace.ContextWithNamespace(m.quitContext, lease.namespace) + err := m.processRestore(ctx, lease.id) + if err != nil { + errs <- err + continue + } + + // Send message that lease is done + result <- struct{}{} + + // quit early + case <-quit: + return + + case <-m.quitCh: + return + } + } + }() + } + + // Distribute the collected keys to the workers in a go routine + wg.Add(1) + go func() { + defer wg.Done() + i := 0 + for ns := range existing { + for _, leaseID := range existing[ns] { + i++ + if i%500 == 0 { + m.logger.Debug("leases loading", "progress", i) + } + + select { + case <-quit: + return + + case <-m.quitCh: + return + + default: + broker <- &lease{ + namespace: ns, + id: leaseID, + } + } + } + } + + // Close the broker, causing worker routines to exit + close(broker) + }() + + // Ensure all keys on the chan are processed + for i := 0; i < leaseCount; i++ { + select { + case err := <-errs: + // Close all go routines + close(quit) + return err + + case <-m.quitCh: + close(quit) + return nil + + case <-result: + } + } + + // Let all go routines finish + wg.Wait() + + m.restoreModeLock.Lock() + atomic.StoreInt32(m.restoreMode, 0) + m.restoreLoaded.Range(func(k, v interface{}) bool { + m.restoreLoaded.Delete(k) + return true + }) + m.restoreLocks = nil + m.restoreModeLock.Unlock() + + m.logger.Info("lease restore complete") + return nil +} + +// processRestore takes a lease and restores it in the expiration manager if it has +// not already been seen +func (m *ExpirationManager) processRestore(ctx context.Context, leaseID string) error { + m.restoreRequestLock.RLock() + defer m.restoreRequestLock.RUnlock() + + // Check if the lease has been seen + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return nil + } + + m.lockLease(leaseID) + defer m.unlockLease(leaseID) + + // Check again with the lease locked + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return nil + } + + // Load lease and restore expiration timer + _, err := m.loadEntryInternal(ctx, leaseID, true, false) + if err != nil { + return err + } + return nil +} + +// Stop is used to prevent further automatic revocations. +// This must be called before sealing the view. +func (m *ExpirationManager) Stop() error { + // Stop all the pending expiration timers + m.logger.Debug("stop triggered") + defer m.logger.Debug("finished stopping") + + // Do this before stopping pending timers to avoid potential races with + // expiring timers + close(m.quitCh) + + m.pendingLock.Lock() + for _, pending := range m.pending { + pending.timer.Stop() + } + m.pending = make(map[string]pendingInfo) + m.pendingLock.Unlock() + + if m.inRestoreMode() { + for { + if !m.inRestoreMode() { + break + } + time.Sleep(10 * time.Millisecond) + } + } + + return nil +} + +// Revoke is used to revoke a secret named by the given LeaseID +func (m *ExpirationManager) Revoke(ctx context.Context, leaseID string) error { + defer metrics.MeasureSince([]string{"expire", "revoke"}, time.Now()) + + return m.revokeCommon(ctx, leaseID, false, false) +} + +// LazyRevoke is used to queue revocation for a secret named by the given +// LeaseID. If the lease was not found it returns nil; if the lease was found +// it triggers a return of a 202. +func (m *ExpirationManager) LazyRevoke(ctx context.Context, leaseID string) error { + defer metrics.MeasureSince([]string{"expire", "lazy-revoke"}, time.Now()) + + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return err + } + + // If there is no entry, nothing to revoke + if le == nil { + return nil + } + + le.ExpireTime = time.Now() + { + m.pendingLock.Lock() + if err := m.persistEntry(ctx, le); err != nil { + m.pendingLock.Unlock() + return err + } + + m.updatePendingInternal(le, 0) + m.pendingLock.Unlock() + } + + return nil +} + +// revokeCommon does the heavy lifting. If force is true, we ignore a problem +// during revocation and still remove entries/index/lease timers +func (m *ExpirationManager) revokeCommon(ctx context.Context, leaseID string, force, skipToken bool) error { + defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now()) + + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return err + } + + // If there is no entry, nothing to revoke + if le == nil { + return nil + } + + // Revoke the entry + if !skipToken || le.Auth == nil { + if err := m.revokeEntry(ctx, le); err != nil { + if !force { + return err + } + + if m.logger.IsWarn() { + m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err) + } + } + } + + // Delete the entry + if err := m.deleteEntry(ctx, le); err != nil { + return err + } + + // Delete the secondary index, but only if it's a leased secret (not auth) + if le.Secret != nil { + if err := m.removeIndexByToken(ctx, le); err != nil { + return err + } + } + + // Clear the expiration handler + m.pendingLock.Lock() + if pending, ok := m.pending[leaseID]; ok { + pending.timer.Stop() + delete(m.pending, leaseID) + } + m.pendingLock.Unlock() + + if m.logger.IsInfo() && !skipToken && m.logLeaseExpirations { + m.logger.Info("revoked lease", "lease_id", leaseID) + } + + return nil +} + +// RevokeForce works similarly to RevokePrefix but continues in the case of a +// revocation error; this is mostly meant for recovery operations +func (m *ExpirationManager) RevokeForce(ctx context.Context, prefix string) error { + defer metrics.MeasureSince([]string{"expire", "revoke-force"}, time.Now()) + + return m.revokePrefixCommon(ctx, prefix, true, true) +} + +// RevokePrefix is used to revoke all secrets with a given prefix. +// The prefix maps to that of the mount table to make this simpler +// to reason about. +func (m *ExpirationManager) RevokePrefix(ctx context.Context, prefix string, sync bool) error { + defer metrics.MeasureSince([]string{"expire", "revoke-prefix"}, time.Now()) + + return m.revokePrefixCommon(ctx, prefix, false, sync) +} + +// RevokeByToken is used to revoke all the secrets issued with a given token. +// This is done by using the secondary index. It also removes the lease entry +// for the token itself. As a result it should *ONLY* ever be called from the +// token store's revokeSalted function. +func (m *ExpirationManager) RevokeByToken(ctx context.Context, te *logical.TokenEntry) error { + defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now()) + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS) + // Lookup the leases + existing, err := m.lookupLeasesByToken(tokenCtx, te) + if err != nil { + return errwrap.Wrapf("failed to scan for leases: {{err}}", err) + } + + // Revoke all the keys + for _, leaseID := range existing { + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return err + } + + // If there's a lease, set expiration to now, persist, and call + // updatePending to hand off revocation to the expiration manager's pending + // timer map + if le != nil { + le.ExpireTime = time.Now() + + { + m.pendingLock.Lock() + if err := m.persistEntry(ctx, le); err != nil { + m.pendingLock.Unlock() + return err + } + + m.updatePendingInternal(le, 0) + m.pendingLock.Unlock() + } + } + } + + // te.Path should never be empty, but we check just in case + if te.Path != "" { + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID) + if err != nil { + return err + } + tokenLeaseID := path.Join(te.Path, saltedID) + + if tokenNS.ID != namespace.RootNamespaceID { + tokenLeaseID = fmt.Sprintf("%s.%s", tokenLeaseID, tokenNS.ID) + } + + // We want to skip the revokeEntry call as that will call back into + // revocation logic in the token store, which is what is running this + // function in the first place -- it'd be a deadlock loop. Since the only + // place that this function is called is revokeSalted in the token store, + // we're already revoking the token, so we just want to clean up the lease. + // This avoids spurious revocations later in the log when the timer runs + // out, and eases up resource usage. + return m.revokeCommon(ctx, tokenLeaseID, false, true) + } + + return nil +} + +func (m *ExpirationManager) revokePrefixCommon(ctx context.Context, prefix string, force, sync bool) error { + if m.inRestoreMode() { + m.restoreRequestLock.Lock() + defer m.restoreRequestLock.Unlock() + } + + // Ensure there is a trailing slash; or, if there is no slash, see if there + // is a matching specific ID + if !strings.HasSuffix(prefix, "/") { + le, err := m.loadEntry(ctx, prefix) + if err == nil && le != nil { + if sync { + if err := m.revokeCommon(ctx, prefix, force, false); err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q: {{err}}", prefix), err) + } + return nil + } + return m.LazyRevoke(ctx, prefix) + } + prefix = prefix + "/" + } + + // Accumulate existing leases + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + view := m.leaseView(ns) + sub := view.SubView(prefix) + existing, err := logical.CollectKeys(ctx, sub) + if err != nil { + return errwrap.Wrapf("failed to scan for leases: {{err}}", err) + } + + // Revoke all the keys + for idx, suffix := range existing { + leaseID := prefix + suffix + switch { + case sync: + if err := m.revokeCommon(ctx, leaseID, force, false); err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err) + } + default: + if err := m.LazyRevoke(ctx, leaseID); err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err) + } + } + } + + return nil +} + +// Renew is used to renew a secret using the given leaseID +// and a renew interval. The increment may be ignored. +func (m *ExpirationManager) Renew(ctx context.Context, leaseID string, increment time.Duration) (*logical.Response, error) { + defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now()) + + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return nil, err + } + + // Check if the lease is renewable + if _, err := le.renewable(); err != nil { + return nil, err + } + + if le.Secret == nil { + if le.Auth != nil { + return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), nil + } + return logical.ErrorResponse("lease does not correspond to a secret"), nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != le.namespace.ID { + return nil, errors.New("cannot renew a lease across namespaces") + } + + sysViewCtx := namespace.ContextWithNamespace(ctx, le.namespace) + sysView := m.router.MatchingSystemView(sysViewCtx, le.Path) + if sysView == nil { + return nil, fmt.Errorf("unable to retrieve system view from router") + } + + // Attempt to renew the entry + resp, err := m.renewEntry(ctx, le, increment) + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + if resp.IsError() { + return &logical.Response{ + Data: resp.Data, + }, nil + } + if resp.Secret == nil { + return nil, nil + } + + ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, le.IssueTime) + if err != nil { + return nil, err + } + for _, warning := range warnings { + resp.AddWarning(warning) + } + resp.Secret.TTL = ttl + + // Attach the LeaseID + resp.Secret.LeaseID = leaseID + + // Update the lease entry + le.Data = resp.Data + le.Secret = resp.Secret + le.ExpireTime = resp.Secret.ExpirationTime() + le.LastRenewalTime = time.Now() + + // If the token it's associated with is a batch token, constrain lease + // times + if le.ClientTokenType == logical.TokenTypeBatch { + te, err := m.tokenStore.Lookup(ctx, le.ClientToken) + if err != nil { + return nil, err + } + if te == nil { + return nil, errors.New("cannot renew lease, no valid associated token") + } + tokenLeaseTimes, err := m.FetchLeaseTimesByToken(ctx, te) + if err != nil { + return nil, err + } + if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) { + resp.Secret.TTL = tokenLeaseTimes.ExpireTime.Sub(le.LastRenewalTime) + le.ExpireTime = tokenLeaseTimes.ExpireTime + } + } + + { + m.pendingLock.Lock() + if err := m.persistEntry(ctx, le); err != nil { + m.pendingLock.Unlock() + return nil, err + } + + // Update the expiration time + m.updatePendingInternal(le, resp.Secret.LeaseTotal()) + m.pendingLock.Unlock() + } + + // Return the response + return resp, nil +} + +// RenewToken is used to renew a token which does not need to +// invoke a logical backend. +func (m *ExpirationManager) RenewToken(ctx context.Context, req *logical.Request, te *logical.TokenEntry, + increment time.Duration) (*logical.Response, error) { + defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now()) + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != tokenNS.ID { + return nil, errors.New("cannot renew a token across namespaces") + } + + // Compute the Lease ID + saltedID, err := m.tokenStore.SaltID(ctx, te.ID) + if err != nil { + return nil, err + } + + leaseID := path.Join(te.Path, saltedID) + + if ns.ID != namespace.RootNamespaceID { + leaseID = fmt.Sprintf("%s.%s", leaseID, ns.ID) + } + + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return nil, err + } + if le == nil { + return logical.ErrorResponse("invalid lease ID"), logical.ErrInvalidRequest + } + + // Check if the lease is renewable. Note that this also checks for a nil + // lease and errors in that case as well. + if _, err := le.renewable(); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Attempt to renew the auth entry + resp, err := m.renewAuthEntry(ctx, req, le, increment) + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + if resp.IsError() { + return &logical.Response{ + Data: resp.Data, + }, nil + } + if resp.Auth == nil { + return nil, nil + } + + sysViewCtx := namespace.ContextWithNamespace(ctx, le.namespace) + sysView := m.router.MatchingSystemView(sysViewCtx, le.Path) + if sysView == nil { + return nil, fmt.Errorf("unable to retrieve system view from router") + } + + ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Auth.TTL, resp.Auth.Period, resp.Auth.MaxTTL, resp.Auth.ExplicitMaxTTL, le.IssueTime) + if err != nil { + return nil, err + } + retResp := &logical.Response{} + for _, warning := range warnings { + retResp.AddWarning(warning) + } + resp.Auth.TTL = ttl + + // Attach the ClientToken + resp.Auth.ClientToken = te.ID + + // Refresh groups + if resp.Auth.EntityID != "" && + resp.Auth.GroupAliases != nil && + m.core.identityStore != nil { + validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, resp.Auth.EntityID, resp.Auth.GroupAliases) + if err != nil { + return nil, err + } + resp.Auth.GroupAliases = validAliases + } + + // Update the lease entry + le.Auth = resp.Auth + le.ExpireTime = resp.Auth.ExpirationTime() + le.LastRenewalTime = time.Now() + + { + m.pendingLock.Lock() + if err := m.persistEntry(ctx, le); err != nil { + m.pendingLock.Unlock() + return nil, err + } + + // Update the expiration time + m.updatePendingInternal(le, resp.Auth.LeaseTotal()) + m.pendingLock.Unlock() + } + + retResp.Auth = resp.Auth + return retResp, nil +} + +// Register is used to take a request and response with an associated +// lease. The secret gets assigned a LeaseID and the management of +// of lease is assumed by the expiration manager. +func (m *ExpirationManager) Register(ctx context.Context, req *logical.Request, resp *logical.Response) (id string, retErr error) { + defer metrics.MeasureSince([]string{"expire", "register"}, time.Now()) + + te := req.TokenEntry() + if te == nil { + return "", fmt.Errorf("cannot register a lease with an empty client token") + } + + // Ignore if there is no leased secret + if resp == nil || resp.Secret == nil { + return "", nil + } + + // Validate the secret + if err := resp.Secret.Validate(); err != nil { + return "", err + } + + // Create a lease entry + leaseRand, err := base62.Random(TokenLength) + if err != nil { + return "", err + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return "", err + } + + leaseID := path.Join(req.Path, leaseRand) + + if ns.ID != namespace.RootNamespaceID { + leaseID = fmt.Sprintf("%s.%s", leaseID, ns.ID) + } + + le := &leaseEntry{ + LeaseID: leaseID, + ClientToken: req.ClientToken, + ClientTokenType: te.Type, + Path: req.Path, + Data: resp.Data, + Secret: resp.Secret, + IssueTime: time.Now(), + ExpireTime: resp.Secret.ExpirationTime(), + namespace: ns, + Version: 1, + } + + defer func() { + // If there is an error we want to rollback as much as possible (note + // that errors here are ignored to do as much cleanup as we can). We + // want to revoke a generated secret (since an error means we may not + // be successfully tracking it), remove indexes, and delete the entry. + if retErr != nil { + revokeCtx := namespace.ContextWithNamespace(m.quitContext, ns) + revResp, err := m.router.Route(revokeCtx, logical.RevokeRequest(req.Path, resp.Secret, resp.Data)) + if err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err)) + } else if revResp != nil && revResp.IsError() { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error())) + } + + if err := m.deleteEntry(ctx, le); err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err)) + } + + if err := m.removeIndexByToken(ctx, le); err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err)) + } + } + }() + + // If the token is a batch token, we want to constrain the maximum lifetime + // by the token's lifetime + if te.Type == logical.TokenTypeBatch { + tokenLeaseTimes, err := m.FetchLeaseTimesByToken(ctx, te) + if err != nil { + return "", err + } + if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) { + le.ExpireTime = tokenLeaseTimes.ExpireTime + } + } + + // Encode the entry + if err := m.persistEntry(ctx, le); err != nil { + return "", err + } + + // Maintain secondary index by token, except for orphan batch tokens + switch { + case te.Type != logical.TokenTypeBatch: + if err := m.createIndexByToken(ctx, le, le.ClientToken); err != nil { + return "", err + } + case te.Parent != "": + // If it's a non-orphan batch token, assign the secondary index to its + // parent + if err := m.createIndexByToken(ctx, le, te.Parent); err != nil { + return "", err + } + } + + // Setup revocation timer if there is a lease + m.updatePending(le, resp.Secret.LeaseTotal()) + + // Done + return le.LeaseID, nil +} + +// RegisterAuth is used to take an Auth response with an associated lease. +// The token does not get a LeaseID, but the lease management is handled by +// the expiration manager. +func (m *ExpirationManager) RegisterAuth(ctx context.Context, te *logical.TokenEntry, auth *logical.Auth) error { + defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now()) + + // Triggers failure of RegisterAuth. This should only be set and triggered + // by tests to simulate partial failure during a token creation request. + if m.testRegisterAuthFailure.Load() { + return fmt.Errorf("failing explicitly on RegisterAuth") + } + + authExpirationTime := auth.ExpirationTime() + + if te.TTL == 0 && authExpirationTime.IsZero() && (len(te.Policies) != 1 || te.Policies[0] != "root") { + return errors.New("refusing to register a lease for a non-root token with no TTL") + } + + if te.Type == logical.TokenTypeBatch { + return errors.New("cannot register a lease for a batch token") + } + + if auth.ClientToken == "" { + return errors.New("cannot register an auth lease with an empty token") + } + + if strings.Contains(te.Path, "..") { + return consts.ErrPathContainsParentReferences + } + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := m.tokenStore.SaltID(saltCtx, auth.ClientToken) + if err != nil { + return err + } + + leaseID := path.Join(te.Path, saltedID) + if tokenNS.ID != namespace.RootNamespaceID { + leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID) + } + + // Create a lease entry + le := leaseEntry{ + LeaseID: leaseID, + ClientToken: auth.ClientToken, + Auth: auth, + Path: te.Path, + IssueTime: time.Now(), + ExpireTime: authExpirationTime, + namespace: tokenNS, + Version: 1, + } + + // Encode the entry + if err := m.persistEntry(ctx, &le); err != nil { + return err + } + + // Setup revocation timer + m.updatePending(&le, auth.LeaseTotal()) + + return nil +} + +// FetchLeaseTimesByToken is a helper function to use token values to compute +// the leaseID, rather than pushing that logic back into the token store. +// As a special case, for a batch token it simply returns the information +// encoded on it. +func (m *ExpirationManager) FetchLeaseTimesByToken(ctx context.Context, te *logical.TokenEntry) (*leaseEntry, error) { + defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now()) + + if te == nil { + return nil, errors.New("cannot fetch lease times for nil token") + } + + if te.Type == logical.TokenTypeBatch { + issueTime := time.Unix(te.CreationTime, 0) + return &leaseEntry{ + IssueTime: issueTime, + ExpireTime: issueTime.Add(te.TTL), + ClientTokenType: logical.TokenTypeBatch, + }, nil + } + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID) + if err != nil { + return nil, err + } + + leaseID := path.Join(te.Path, saltedID) + + if tokenNS.ID != namespace.RootNamespaceID { + leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID) + } + + return m.FetchLeaseTimes(ctx, leaseID) +} + +// FetchLeaseTimes is used to fetch the issue time, expiration time, and last +// renewed time of a lease entry. It returns a leaseEntry itself, but with only +// those values copied over. +func (m *ExpirationManager) FetchLeaseTimes(ctx context.Context, leaseID string) (*leaseEntry, error) { + defer metrics.MeasureSince([]string{"expire", "fetch-lease-times"}, time.Now()) + + m.pendingLock.RLock() + val := m.pending[leaseID] + m.pendingLock.RUnlock() + + if val.exportLeaseTimes != nil { + return val.exportLeaseTimes, nil + } + + // Load the entry + le, err := m.loadEntryInternal(ctx, leaseID, true, false) + if err != nil { + return nil, err + } + if le == nil { + return nil, nil + } + + return m.leaseTimesForExport(le), nil +} + +// Returns lease times for outside callers based on the full leaseEntry passed in +func (m *ExpirationManager) leaseTimesForExport(le *leaseEntry) *leaseEntry { + ret := &leaseEntry{ + IssueTime: le.IssueTime, + ExpireTime: le.ExpireTime, + LastRenewalTime: le.LastRenewalTime, + } + if le.Secret != nil { + ret.Secret = &logical.Secret{} + ret.Secret.Renewable = le.Secret.Renewable + ret.Secret.TTL = le.Secret.TTL + } + if le.Auth != nil { + ret.Auth = &logical.Auth{} + ret.Auth.Renewable = le.Auth.Renewable + ret.Auth.TTL = le.Auth.TTL + } + + return ret +} + +// updatePending is used to update a pending invocation for a lease +func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) { + m.pendingLock.Lock() + defer m.pendingLock.Unlock() + + m.updatePendingInternal(le, leaseTotal) +} + +// updatePendingInternal is the locked version of updatePending; do not call +// this without a write lock on m.pending +func (m *ExpirationManager) updatePendingInternal(le *leaseEntry, leaseTotal time.Duration) { + // Check for an existing timer + pending, ok := m.pending[le.LeaseID] + + // If there is no expiry time, don't do anything + if le.ExpireTime.IsZero() { + // if the timer happened to exist, stop the time and delete it from the + // pending timers. + if ok { + pending.timer.Stop() + delete(m.pending, le.LeaseID) + } + return + } + + // Create entry if it does not exist or reset if it does + if ok { + pending.timer.Reset(leaseTotal) + } else { + timer := time.AfterFunc(leaseTotal, func() { + m.expireFunc(m.quitContext, m, le) + }) + pending = pendingInfo{ + timer: timer, + } + } + + // Extend the timer by the lease total + pending.exportLeaseTimes = m.leaseTimesForExport(le) + + m.pending[le.LeaseID] = pending +} + +// revokeEntry is used to attempt revocation of an internal entry +func (m *ExpirationManager) revokeEntry(ctx context.Context, le *leaseEntry) error { + // Revocation of login tokens is special since we can by-pass the + // backend and directly interact with the token store + if le.Auth != nil { + if le.ClientTokenType == logical.TokenTypeBatch { + return errors.New("batch tokens cannot be revoked") + } + + if err := m.tokenStore.revokeTree(ctx, le); err != nil { + return errwrap.Wrapf("failed to revoke token: {{err}}", err) + } + + return nil + } + + if le.Secret != nil { + // not sure if this is really valid to have a leaseEntry with a nil Secret + // (if there's a nil Secret, what are you really leasing?), but the tests + // create one, and good to be defensive + le.Secret.IssueTime = le.IssueTime + } + + // Make sure we're operating in the right namespace + nsCtx := namespace.ContextWithNamespace(ctx, le.namespace) + + // Handle standard revocation via backends + resp, err := m.router.Route(nsCtx, logical.RevokeRequest(le.Path, le.Secret, le.Data)) + if err != nil || (resp != nil && resp.IsError()) { + return errwrap.Wrapf(fmt.Sprintf("failed to revoke entry: resp: %#v err: {{err}}", resp), err) + } + return nil +} + +// renewEntry is used to attempt renew of an internal entry +func (m *ExpirationManager) renewEntry(ctx context.Context, le *leaseEntry, increment time.Duration) (*logical.Response, error) { + secret := *le.Secret + secret.IssueTime = le.IssueTime + secret.Increment = increment + secret.LeaseID = "" + + // Make sure we're operating in the right namespace + nsCtx := namespace.ContextWithNamespace(ctx, le.namespace) + + req := logical.RenewRequest(le.Path, &secret, le.Data) + resp, err := m.router.Route(nsCtx, req) + if err != nil || (resp != nil && resp.IsError()) { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to renew entry: resp: %#v err: {{err}}", resp), err) + } + return resp, nil +} + +// renewAuthEntry is used to attempt renew of an auth entry. Only the token +// store should get the actual token ID intact. +func (m *ExpirationManager) renewAuthEntry(ctx context.Context, req *logical.Request, le *leaseEntry, increment time.Duration) (*logical.Response, error) { + if le.ClientTokenType == logical.TokenTypeBatch { + return logical.ErrorResponse("batch tokens cannot be renewed"), nil + } + + auth := *le.Auth + auth.IssueTime = le.IssueTime + auth.Increment = increment + if strings.HasPrefix(le.Path, "auth/token/") { + auth.ClientToken = le.ClientToken + } else { + auth.ClientToken = "" + } + + // Make sure we're operating in the right namespace + nsCtx := namespace.ContextWithNamespace(ctx, le.namespace) + + authReq := logical.RenewAuthRequest(le.Path, &auth, nil) + authReq.Connection = req.Connection + resp, err := m.router.Route(nsCtx, authReq) + if err != nil { + return nil, errwrap.Wrapf("failed to renew entry: {{err}}", err) + } + return resp, nil +} + +// loadEntry is used to read a lease entry +func (m *ExpirationManager) loadEntry(ctx context.Context, leaseID string) (*leaseEntry, error) { + // Take out the lease locks after we ensure we are in restore mode + restoreMode := m.inRestoreMode() + if restoreMode { + m.restoreModeLock.RLock() + defer m.restoreModeLock.RUnlock() + + restoreMode = m.inRestoreMode() + if restoreMode { + m.lockLease(leaseID) + defer m.unlockLease(leaseID) + } + } + + _, nsID := namespace.SplitIDFromString(leaseID) + if nsID != "" { + leaseNS, err := NamespaceByID(ctx, nsID, m.core) + if err != nil { + return nil, err + } + if leaseNS != nil { + ctx = namespace.ContextWithNamespace(ctx, leaseNS) + } + } else { + ctx = namespace.ContextWithNamespace(ctx, namespace.RootNamespace) + } + return m.loadEntryInternal(ctx, leaseID, restoreMode, true) +} + +// loadEntryInternal is used when you need to load an entry but also need to +// control the lifecycle of the restoreLock +func (m *ExpirationManager) loadEntryInternal(ctx context.Context, leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + view := m.leaseView(ns) + out, err := view.Get(ctx, leaseID) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to read lease entry %s: {{err}}", leaseID), err) + } + if out == nil { + return nil, nil + } + le, err := decodeLeaseEntry(out.Value) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode lease entry %s: {{err}}", leaseID), err) + } + le.namespace = ns + + if restoreMode { + if checkRestored { + // If we have already loaded this lease, we don't need to update on + // load. In the case of renewal and revocation, updatePending will be + // done after making the appropriate modifications to the lease. + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return le, nil + } + } + + // Update the cache of restored leases, either synchronously or through + // the lazy loaded restore process + m.restoreLoaded.Store(le.LeaseID, struct{}{}) + + // Setup revocation timer + m.updatePending(le, le.ExpireTime.Sub(time.Now())) + } + return le, nil +} + +// persistEntry is used to persist a lease entry +func (m *ExpirationManager) persistEntry(ctx context.Context, le *leaseEntry) error { + // Encode the entry + buf, err := le.encode() + if err != nil { + return errwrap.Wrapf("failed to encode lease entry: {{err}}", err) + } + + // Write out to the view + ent := logical.StorageEntry{ + Key: le.LeaseID, + Value: buf, + } + if le.Auth != nil && len(le.Auth.Policies) == 1 && le.Auth.Policies[0] == "root" { + ent.SealWrap = true + } + + view := m.leaseView(le.namespace) + if err := view.Put(ctx, &ent); err != nil { + return errwrap.Wrapf("failed to persist lease entry: {{err}}", err) + } + return nil +} + +// deleteEntry is used to delete a lease entry +func (m *ExpirationManager) deleteEntry(ctx context.Context, le *leaseEntry) error { + view := m.leaseView(le.namespace) + if err := view.Delete(ctx, le.LeaseID); err != nil { + return errwrap.Wrapf("failed to delete lease entry: {{err}}", err) + } + return nil +} + +// createIndexByToken creates a secondary index from the token to a lease entry +func (m *ExpirationManager) createIndexByToken(ctx context.Context, le *leaseEntry, token string) error { + tokenNS := namespace.RootNamespace + saltCtx := namespace.ContextWithNamespace(ctx, namespace.RootNamespace) + _, nsID := namespace.SplitIDFromString(token) + if nsID != "" { + var err error + tokenNS, err = NamespaceByID(ctx, nsID, m.core) + if err != nil { + return err + } + if tokenNS != nil { + saltCtx = namespace.ContextWithNamespace(ctx, tokenNS) + } + } + + saltedID, err := m.tokenStore.SaltID(saltCtx, token) + if err != nil { + return err + } + + leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID) + if err != nil { + return err + } + + ent := logical.StorageEntry{ + Key: saltedID + "/" + leaseSaltedID, + Value: []byte(le.LeaseID), + } + tokenView := m.tokenIndexView(tokenNS) + if err := tokenView.Put(ctx, &ent); err != nil { + return errwrap.Wrapf("failed to persist lease index entry: {{err}}", err) + } + return nil +} + +// indexByToken looks up the secondary index from the token to a lease entry +func (m *ExpirationManager) indexByToken(ctx context.Context, le *leaseEntry) (*logical.StorageEntry, error) { + tokenNS := namespace.RootNamespace + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + _, nsID := namespace.SplitIDFromString(le.ClientToken) + if nsID != "" { + var err error + tokenNS, err = NamespaceByID(ctx, nsID, m.core) + if err != nil { + return nil, err + } + if tokenNS != nil { + saltCtx = namespace.ContextWithNamespace(ctx, tokenNS) + } + } + + saltedID, err := m.tokenStore.SaltID(saltCtx, le.ClientToken) + if err != nil { + return nil, err + } + + leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID) + if err != nil { + return nil, err + } + + key := saltedID + "/" + leaseSaltedID + tokenView := m.tokenIndexView(tokenNS) + entry, err := tokenView.Get(ctx, key) + if err != nil { + return nil, fmt.Errorf("failed to look up secondary index entry") + } + return entry, nil +} + +// removeIndexByToken removes the secondary index from the token to a lease entry +func (m *ExpirationManager) removeIndexByToken(ctx context.Context, le *leaseEntry) error { + tokenNS := namespace.RootNamespace + saltCtx := namespace.ContextWithNamespace(ctx, namespace.RootNamespace) + _, nsID := namespace.SplitIDFromString(le.ClientToken) + if nsID != "" { + var err error + tokenNS, err = NamespaceByID(ctx, nsID, m.core) + if err != nil { + return err + } + if tokenNS != nil { + saltCtx = namespace.ContextWithNamespace(ctx, tokenNS) + } + + // Downgrade logic for old-style (V0) namespace leases that had its + // secondary index live in the root namespace. This reverts to the old + // behavior of looking for the secondary index on these leases in the + // root namespace to be cleaned up properly. We set it here because the + // old behavior used the namespace's token store salt for its saltCtx. + if le.Version < 1 { + tokenNS = namespace.RootNamespace + } + } + + saltedID, err := m.tokenStore.SaltID(saltCtx, le.ClientToken) + if err != nil { + return err + } + + leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID) + if err != nil { + return err + } + + key := saltedID + "/" + leaseSaltedID + tokenView := m.tokenIndexView(tokenNS) + if err := tokenView.Delete(ctx, key); err != nil { + return errwrap.Wrapf("failed to delete lease index entry: {{err}}", err) + } + return nil +} + +// CreateOrFetchRevocationLeaseByToken is used to create or fetch the matching +// leaseID for a particular token. The lease is set to expire immediately after +// it's created. +func (m *ExpirationManager) CreateOrFetchRevocationLeaseByToken(ctx context.Context, te *logical.TokenEntry) (string, error) { + // Fetch the saltedID of the token and construct the leaseID + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return "", err + } + if tokenNS == nil { + return "", namespace.ErrNoNamespace + } + + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID) + if err != nil { + return "", err + } + leaseID := path.Join(te.Path, saltedID) + + if tokenNS.ID != namespace.RootNamespaceID { + leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID) + } + + // Load the entry + le, err := m.loadEntry(ctx, leaseID) + if err != nil { + return "", err + } + + // If there's no associated leaseEntry for the token, we create one + if le == nil { + auth := &logical.Auth{ + ClientToken: te.ID, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Nanosecond, + }, + } + + if strings.Contains(te.Path, "..") { + return "", consts.ErrPathContainsParentReferences + } + + // Create a lease entry + now := time.Now() + le = &leaseEntry{ + LeaseID: leaseID, + ClientToken: auth.ClientToken, + Auth: auth, + Path: te.Path, + IssueTime: now, + ExpireTime: now.Add(time.Nanosecond), + namespace: tokenNS, + Version: 1, + } + + // Encode the entry + if err := m.persistEntry(ctx, le); err != nil { + return "", err + } + } + + return le.LeaseID, nil +} + +// lookupLeasesByToken is used to lookup all the leaseID's via the tokenID +func (m *ExpirationManager) lookupLeasesByToken(ctx context.Context, te *logical.TokenEntry) ([]string, error) { + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID) + if err != nil { + return nil, err + } + + tokenView := m.tokenIndexView(tokenNS) + + // Scan via the index for sub-leases + prefix := saltedID + "/" + subKeys, err := tokenView.List(ctx, prefix) + if err != nil { + return nil, errwrap.Wrapf("failed to list leases: {{err}}", err) + } + + // Read each index entry + leaseIDs := make([]string, 0, len(subKeys)) + for _, sub := range subKeys { + out, err := tokenView.Get(ctx, prefix+sub) + if err != nil { + return nil, errwrap.Wrapf("failed to read lease index: {{err}}", err) + } + if out == nil { + continue + } + leaseIDs = append(leaseIDs, string(out.Value)) + } + + // Downgrade logic for old-style (V0) leases entries created by a namespace + // token that lived in the root namespace. + if tokenNS.ID != namespace.RootNamespaceID { + tokenView := m.tokenIndexView(namespace.RootNamespace) + + // Scan via the index for sub-leases on the root namespace + prefix := saltedID + "/" + subKeys, err := tokenView.List(ctx, prefix) + if err != nil { + return nil, errwrap.Wrapf("failed to list leases on root namespace: {{err}}", err) + } + + for _, sub := range subKeys { + out, err := tokenView.Get(ctx, prefix+sub) + if err != nil { + return nil, errwrap.Wrapf("failed to read lease index on root namespace: {{err}}", err) + } + if out == nil { + continue + } + leaseIDs = append(leaseIDs, string(out.Value)) + } + } + + return leaseIDs, nil +} + +// emitMetrics is invoked periodically to emit statistics +func (m *ExpirationManager) emitMetrics() { + m.pendingLock.RLock() + num := len(m.pending) + m.pendingLock.RUnlock() + metrics.SetGauge([]string{"expire", "num_leases"}, float32(num)) + // Check if lease count is greater than the threshold + if num > maxLeaseThreshold { + if atomic.LoadUint32(m.leaseCheckCounter) > 59 { + m.logger.Warn("lease count exceeds warning lease threshold") + atomic.StoreUint32(m.leaseCheckCounter, 0) + } else { + atomic.AddUint32(m.leaseCheckCounter, 1) + } + } +} + +// leaseEntry is used to structure the values the expiration +// manager stores. This is used to handle renew and revocation. +type leaseEntry struct { + LeaseID string `json:"lease_id"` + ClientToken string `json:"client_token"` + ClientTokenType logical.TokenType `json:"token_type"` + Path string `json:"path"` + Data map[string]interface{} `json:"data"` + Secret *logical.Secret `json:"secret"` + Auth *logical.Auth `json:"auth"` + IssueTime time.Time `json:"issue_time"` + ExpireTime time.Time `json:"expire_time"` + LastRenewalTime time.Time `json:"last_renewal_time"` + + // Version is used to track new different versions of leases. V0 (or + // zero-value) had non-root namespaced secondary indexes live in the root + // namespace, and V1 has secondary indexes live in the matching namespace. + Version int `json:"version"` + + namespace *namespace.Namespace +} + +// encode is used to JSON encode the lease entry +func (le *leaseEntry) encode() ([]byte, error) { + return json.Marshal(le) +} + +func (le *leaseEntry) renewable() (bool, error) { + switch { + // If there is no entry, cannot review to renew + case le == nil: + return false, fmt.Errorf("lease not found") + + case le.ExpireTime.IsZero(): + return false, fmt.Errorf("lease is not renewable") + + case le.ClientTokenType == logical.TokenTypeBatch: + return false, nil + + // Determine if the lease is expired + case le.ExpireTime.Before(time.Now()): + return false, fmt.Errorf("lease expired") + + // Determine if the lease is renewable + case le.Secret != nil && !le.Secret.Renewable: + return false, fmt.Errorf("lease is not renewable") + + case le.Auth != nil && !le.Auth.Renewable: + return false, fmt.Errorf("lease is not renewable") + } + + return true, nil +} + +func (le *leaseEntry) ttl() int64 { + return int64(le.ExpireTime.Sub(time.Now().Round(time.Second)).Seconds()) +} + +// decodeLeaseEntry is used to reverse encode and return a new entry +func decodeLeaseEntry(buf []byte) (*leaseEntry, error) { + out := new(leaseEntry) + return out, jsonutil.DecodeJSON(buf, out) +} diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_util.go b/vendor/github.com/hashicorp/vault/vault/expiration_util.go new file mode 100644 index 00000000..c1bdaae1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/expiration_util.go @@ -0,0 +1,29 @@ +// +build !enterprise + +package vault + +import ( + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +func (m *ExpirationManager) leaseView(*namespace.Namespace) *BarrierView { + return m.idView +} + +func (m *ExpirationManager) tokenIndexView(*namespace.Namespace) *BarrierView { + return m.tokenView +} + +func (m *ExpirationManager) collectLeases() (map[*namespace.Namespace][]string, int, error) { + leaseCount := 0 + existing := make(map[*namespace.Namespace][]string) + keys, err := logical.CollectKeys(m.quitContext, m.leaseView(namespace.RootNamespace)) + if err != nil { + return nil, 0, errwrap.Wrapf("failed to scan for leases: {{err}}", err) + } + existing[namespace.RootNamespace] = keys + leaseCount += len(keys) + return existing, leaseCount, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root.go b/vendor/github.com/hashicorp/vault/vault/generate_root.go new file mode 100644 index 00000000..ca8ab54e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/generate_root.go @@ -0,0 +1,391 @@ +package vault + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/helper/xor" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/shamir" +) + +const coreDROperationTokenPath = "core/dr-operation-token" + +var ( + // GenerateStandardRootTokenStrategy is the strategy used to generate a + // typical root token + GenerateStandardRootTokenStrategy GenerateRootStrategy = generateStandardRootToken{} + + // GenerateDROperationTokenStrategy is the strategy used to generate a + // DR operational token + GenerateDROperationTokenStrategy GenerateRootStrategy = generateStandardRootToken{} +) + +// GenerateRootStrategy allows us to swap out the strategy we want to use to +// create a token upon completion of the generate root process. +type GenerateRootStrategy interface { + generate(context.Context, *Core) (string, func(), error) + authenticate(context.Context, *Core, []byte) error +} + +// generateStandardRootToken implements the GenerateRootStrategy and is in +// charge of creating standard root tokens. +type generateStandardRootToken struct{} + +func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { + masterKey, err := c.unsealKeyToMasterKey(ctx, combinedKey) + if err != nil { + return errwrap.Wrapf("unable to authenticate: {{err}}", err) + } + if err := c.barrier.VerifyMaster(masterKey); err != nil { + return errwrap.Wrapf("master key verification failed: {{err}}", err) + } + + return nil +} + +func (g generateStandardRootToken) generate(ctx context.Context, c *Core) (string, func(), error) { + te, err := c.tokenStore.rootToken(ctx) + if err != nil { + c.logger.Error("root token generation failed", "error", err) + return "", nil, err + } + if te == nil { + c.logger.Error("got nil token entry back from root generation") + return "", nil, fmt.Errorf("got nil token entry back from root generation") + } + + cleanupFunc := func() { + c.tokenStore.revokeOrphan(ctx, te.ID) + } + + return te.ID, cleanupFunc, nil +} + +// GenerateRootConfig holds the configuration for a root generation +// command. +type GenerateRootConfig struct { + Nonce string + PGPKey string + PGPFingerprint string + OTP string + Strategy GenerateRootStrategy +} + +// GenerateRootResult holds the result of a root generation update +// command +type GenerateRootResult struct { + Progress int + Required int + EncodedToken string + PGPFingerprint string +} + +// GenerateRootProgress is used to return the root generation progress (num shares) +func (c *Core) GenerateRootProgress() (int, error) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() && !c.recoveryMode { + return 0, consts.ErrSealed + } + if c.standby && !c.recoveryMode { + return 0, consts.ErrStandby + } + + c.generateRootLock.Lock() + defer c.generateRootLock.Unlock() + + return len(c.generateRootProgress), nil +} + +// GenerateRootConfiguration is used to read the root generation configuration +// It stubbornly refuses to return the OTP if one is there. +func (c *Core) GenerateRootConfiguration() (*GenerateRootConfig, error) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() && !c.recoveryMode { + return nil, consts.ErrSealed + } + if c.standby && !c.recoveryMode { + return nil, consts.ErrStandby + } + + c.generateRootLock.Lock() + defer c.generateRootLock.Unlock() + + // Copy the config if any + var conf *GenerateRootConfig + if c.generateRootConfig != nil { + conf = new(GenerateRootConfig) + *conf = *c.generateRootConfig + conf.OTP = "" + conf.Strategy = nil + } + return conf, nil +} + +// GenerateRootInit is used to initialize the root generation settings +func (c *Core) GenerateRootInit(otp, pgpKey string, strategy GenerateRootStrategy) error { + var fingerprint string + switch { + case len(otp) > 0: + if len(otp) != TokenLength+2 { + return fmt.Errorf("OTP string is wrong length") + } + + case len(pgpKey) > 0: + fingerprints, err := pgpkeys.GetFingerprints([]string{pgpKey}, nil) + if err != nil { + return errwrap.Wrapf("error parsing PGP key: {{err}}", err) + } + if len(fingerprints) != 1 || fingerprints[0] == "" { + return fmt.Errorf("could not acquire PGP key entity") + } + fingerprint = fingerprints[0] + + default: + return fmt.Errorf("otp or pgp_key parameter must be provided") + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() && !c.recoveryMode { + return consts.ErrSealed + } + barrierSealed, err := c.barrier.Sealed() + if err != nil { + return errors.New("unable to check barrier seal status") + } + if !barrierSealed && c.recoveryMode { + return errors.New("attempt to generate recovery operation token when already unsealed") + } + if c.standby && !c.recoveryMode { + return consts.ErrStandby + } + + c.generateRootLock.Lock() + defer c.generateRootLock.Unlock() + + // Prevent multiple concurrent root generations + if c.generateRootConfig != nil { + return fmt.Errorf("root generation already in progress") + } + + // Copy the configuration + generationNonce, err := uuid.GenerateUUID() + if err != nil { + return err + } + + c.generateRootConfig = &GenerateRootConfig{ + Nonce: generationNonce, + OTP: otp, + PGPKey: pgpKey, + PGPFingerprint: fingerprint, + Strategy: strategy, + } + + if c.logger.IsInfo() { + switch strategy.(type) { + case generateStandardRootToken: + c.logger.Info("root generation initialized", "nonce", c.generateRootConfig.Nonce) + case *generateRecoveryToken: + c.logger.Info("recovery operation token generation initialized", "nonce", c.generateRootConfig.Nonce) + default: + c.logger.Info("dr operation token generation initialized", "nonce", c.generateRootConfig.Nonce) + } + } + + return nil +} + +// GenerateRootUpdate is used to provide a new key part +func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, strategy GenerateRootStrategy) (*GenerateRootResult, error) { + // Verify the key length + min, max := c.barrier.KeyLength() + max += shamir.ShareOverhead + if len(key) < min { + return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)} + } + if len(key) > max { + return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)} + } + + // Get the seal configuration + var config *SealConfig + var err error + if c.seal.RecoveryKeySupported() { + config, err = c.seal.RecoveryConfig(ctx) + if err != nil { + return nil, err + } + } else { + config, err = c.seal.BarrierConfig(ctx) + if err != nil { + return nil, err + } + } + + // Ensure the barrier is initialized + if config == nil { + return nil, ErrNotInit + } + + // Ensure we are already unsealed + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() && !c.recoveryMode { + return nil, consts.ErrSealed + } + + barrierSealed, err := c.barrier.Sealed() + if err != nil { + return nil, errors.New("unable to check barrier seal status") + } + if !barrierSealed && c.recoveryMode { + return nil, errors.New("attempt to generate recovery operation token when already unsealed") + } + + if c.standby && !c.recoveryMode { + return nil, consts.ErrStandby + } + + c.generateRootLock.Lock() + defer c.generateRootLock.Unlock() + + // Ensure a generateRoot is in progress + if c.generateRootConfig == nil { + return nil, fmt.Errorf("no root generation in progress") + } + + if nonce != c.generateRootConfig.Nonce { + return nil, fmt.Errorf("incorrect nonce supplied; nonce for this root generation operation is %q", c.generateRootConfig.Nonce) + } + + if strategy != c.generateRootConfig.Strategy { + return nil, fmt.Errorf("incorrect strategy supplied; a generate root operation of another type is already in progress") + } + + // Check if we already have this piece + for _, existing := range c.generateRootProgress { + if bytes.Equal(existing, key) { + return nil, fmt.Errorf("given key has already been provided during this generation operation") + } + } + + // Store this key + c.generateRootProgress = append(c.generateRootProgress, key) + progress := len(c.generateRootProgress) + + // Check if we don't have enough keys to unlock + if len(c.generateRootProgress) < config.SecretThreshold { + if c.logger.IsDebug() { + c.logger.Debug("cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold) + } + return &GenerateRootResult{ + Progress: progress, + Required: config.SecretThreshold, + PGPFingerprint: c.generateRootConfig.PGPFingerprint, + }, nil + } + + // Combine the key parts + var combinedKey []byte + if config.SecretThreshold == 1 { + combinedKey = c.generateRootProgress[0] + c.generateRootProgress = nil + } else { + combinedKey, err = shamir.Combine(c.generateRootProgress) + c.generateRootProgress = nil + if err != nil { + return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err) + } + } + + if err := strategy.authenticate(ctx, c, combinedKey); err != nil { + c.logger.Error("root generation aborted", "error", err.Error()) + return nil, errwrap.Wrapf("root generation aborted: {{err}}", err) + } + + // Run the generate strategy + token, cleanupFunc, err := strategy.generate(ctx, c) + if err != nil { + return nil, err + } + + var tokenBytes []byte + + // Get the encoded value first so that if there is an error we don't create + // the root token. + switch { + case len(c.generateRootConfig.OTP) > 0: + // This function performs decoding checks so rather than decode the OTP, + // just encode the value we're passing in. + tokenBytes, err = xor.XORBytes([]byte(c.generateRootConfig.OTP), []byte(token)) + if err != nil { + cleanupFunc() + c.logger.Error("xor of root token failed", "error", err) + return nil, err + } + token = base64.RawStdEncoding.EncodeToString(tokenBytes) + + case len(c.generateRootConfig.PGPKey) > 0: + _, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(token)}, []string{c.generateRootConfig.PGPKey}) + if err != nil { + cleanupFunc() + c.logger.Error("error encrypting new root token", "error", err) + return nil, err + } + token = base64.StdEncoding.EncodeToString(tokenBytesArr[0]) + + default: + cleanupFunc() + return nil, fmt.Errorf("unreachable condition") + } + + results := &GenerateRootResult{ + Progress: progress, + Required: config.SecretThreshold, + EncodedToken: token, + PGPFingerprint: c.generateRootConfig.PGPFingerprint, + } + + switch strategy.(type) { + case generateStandardRootToken: + c.logger.Info("root generation finished", "nonce", c.generateRootConfig.Nonce) + case *generateRecoveryToken: + c.logger.Info("recovery operation token generation finished", "nonce", c.generateRootConfig.Nonce) + default: + c.logger.Info("dr operation token generation finished", "nonce", c.generateRootConfig.Nonce) + } + + c.generateRootProgress = nil + c.generateRootConfig = nil + return results, nil +} + +// GenerateRootCancel is used to cancel an in-progress root generation +func (c *Core) GenerateRootCancel() error { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() && !c.recoveryMode { + return consts.ErrSealed + } + if c.standby && !c.recoveryMode { + return consts.ErrStandby + } + + c.generateRootLock.Lock() + defer c.generateRootLock.Unlock() + + // Clear any progress or config + c.generateRootConfig = nil + c.generateRootProgress = nil + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root_recovery.go b/vendor/github.com/hashicorp/vault/vault/generate_root_recovery.go new file mode 100644 index 00000000..e677802e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/generate_root_recovery.go @@ -0,0 +1,51 @@ +package vault + +import ( + "context" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/base62" + "go.uber.org/atomic" +) + +// GenerateRecoveryTokenStrategy is the strategy used to generate a +// recovery token +func GenerateRecoveryTokenStrategy(token *atomic.String) GenerateRootStrategy { + return &generateRecoveryToken{token: token} +} + +// generateRecoveryToken implements the GenerateRootStrategy and is in +// charge of creating recovery tokens. +type generateRecoveryToken struct { + token *atomic.String +} + +func (g *generateRecoveryToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { + key, err := c.unsealKeyToMasterKey(ctx, combinedKey) + if err != nil { + return errwrap.Wrapf("unable to authenticate: {{err}}", err) + } + + // Use the retrieved master key to unseal the barrier + if err := c.barrier.Unseal(ctx, key); err != nil { + return errwrap.Wrapf("recovery operation token generation failed, cannot unseal barrier: {{err}}", err) + } + + for _, v := range c.postRecoveryUnsealFuncs { + if err := v(); err != nil { + return errwrap.Wrapf("failed to run post unseal func: {{err}}", err) + } + } + return nil +} + +func (g *generateRecoveryToken) generate(ctx context.Context, c *Core) (string, func(), error) { + id, err := base62.Random(TokenLength) + if err != nil { + return "", nil, err + } + token := "r." + id + g.token.Store(token) + + return token, func() { g.token.Store("") }, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/ha.go b/vendor/github.com/hashicorp/vault/vault/ha.go new file mode 100644 index 00000000..3e89b1c2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/ha.go @@ -0,0 +1,981 @@ +package vault + +import ( + "context" + "crypto/ecdsa" + "crypto/x509" + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/vault/seal/shamir" + + "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/oklog/run" +) + +const ( + // lockRetryInterval is the interval we re-attempt to acquire the + // HA lock if an error is encountered + lockRetryInterval = 10 * time.Second + + // leaderCheckInterval is how often a standby checks for a new leader + leaderCheckInterval = 2500 * time.Millisecond + + // keyRotateCheckInterval is how often a standby checks for a key + // rotation taking place. + keyRotateCheckInterval = 10 * time.Second + + // leaderPrefixCleanDelay is how long to wait between deletions + // of orphaned leader keys, to prevent slamming the backend. + leaderPrefixCleanDelay = 200 * time.Millisecond +) + +var ( + // KeyRotateGracePeriod is how long we allow an upgrade path + // for standby instances before we delete the upgrade keys + KeyRotateGracePeriod = 2 * time.Minute + + addEnterpriseHaActors func(*Core, *run.Group) chan func() = addEnterpriseHaActorsNoop + interruptPerfStandby func(chan func(), chan struct{}) chan struct{} = interruptPerfStandbyNoop +) + +func addEnterpriseHaActorsNoop(*Core, *run.Group) chan func() { return nil } +func interruptPerfStandbyNoop(chan func(), chan struct{}) chan struct{} { + return make(chan struct{}) +} + +// Standby checks if the Vault is in standby mode +func (c *Core) Standby() (bool, error) { + c.stateLock.RLock() + standby := c.standby + c.stateLock.RUnlock() + return standby, nil +} + +// PerfStandby checks if the vault is a performance standby +func (c *Core) PerfStandby() bool { + c.stateLock.RLock() + perfStandby := c.perfStandby + c.stateLock.RUnlock() + return perfStandby +} + +// Leader is used to get the current active leader +func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) { + // Check if HA enabled. We don't need the lock for this check as it's set + // on startup and never modified + if c.ha == nil { + return false, "", "", ErrHANotEnabled + } + + // Check if sealed + if c.Sealed() { + return false, "", "", consts.ErrSealed + } + + c.stateLock.RLock() + + // Check if we are the leader + if !c.standby { + c.stateLock.RUnlock() + return true, c.redirectAddr, c.ClusterAddr(), nil + } + + // Initialize a lock + lock, err := c.ha.LockWith(CoreLockPath, "read") + if err != nil { + c.stateLock.RUnlock() + return false, "", "", err + } + + // Read the value + held, leaderUUID, err := lock.Value() + if err != nil { + c.stateLock.RUnlock() + return false, "", "", err + } + if !held { + c.stateLock.RUnlock() + return false, "", "", nil + } + + var localLeaderUUID, localRedirectAddr, localClusterAddr string + clusterLeaderParams := c.clusterLeaderParams.Load().(*ClusterLeaderParams) + if clusterLeaderParams != nil { + localLeaderUUID = clusterLeaderParams.LeaderUUID + localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr + localClusterAddr = clusterLeaderParams.LeaderClusterAddr + } + + // If the leader hasn't changed, return the cached value; nothing changes + // mid-leadership, and the barrier caches anyways + if leaderUUID == localLeaderUUID && localRedirectAddr != "" { + c.stateLock.RUnlock() + return false, localRedirectAddr, localClusterAddr, nil + } + + c.logger.Trace("found new active node information, refreshing") + + defer c.stateLock.RUnlock() + c.leaderParamsLock.Lock() + defer c.leaderParamsLock.Unlock() + + // Validate base conditions again + clusterLeaderParams = c.clusterLeaderParams.Load().(*ClusterLeaderParams) + if clusterLeaderParams != nil { + localLeaderUUID = clusterLeaderParams.LeaderUUID + localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr + localClusterAddr = clusterLeaderParams.LeaderClusterAddr + } else { + localLeaderUUID = "" + localRedirectAddr = "" + localClusterAddr = "" + } + + if leaderUUID == localLeaderUUID && localRedirectAddr != "" { + return false, localRedirectAddr, localClusterAddr, nil + } + + key := coreLeaderPrefix + leaderUUID + // Use background because postUnseal isn't run on standby + entry, err := c.barrier.Get(context.Background(), key) + if err != nil { + return false, "", "", err + } + if entry == nil { + return false, "", "", nil + } + + var oldAdv bool + + var adv activeAdvertisement + err = jsonutil.DecodeJSON(entry.Value, &adv) + if err != nil { + // Fall back to pre-struct handling + adv.RedirectAddr = string(entry.Value) + c.logger.Debug("parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr) + oldAdv = true + } + + if !oldAdv { + c.logger.Debug("parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr) + + // Ensure we are using current values + err = c.loadLocalClusterTLS(adv) + if err != nil { + return false, "", "", err + } + + // This will ensure that we both have a connection at the ready and that + // the address is the current known value + // Since this is standby, we don't use the active context. Later we may + // use a process-scoped context + err = c.refreshRequestForwardingConnection(context.Background(), adv.ClusterAddr) + if err != nil { + return false, "", "", err + } + } + + // Don't set these until everything has been parsed successfully or we'll + // never try again + c.clusterLeaderParams.Store(&ClusterLeaderParams{ + LeaderUUID: leaderUUID, + LeaderRedirectAddr: adv.RedirectAddr, + LeaderClusterAddr: adv.ClusterAddr, + }) + + return false, adv.RedirectAddr, adv.ClusterAddr, nil +} + +// StepDown is used to step down from leadership +func (c *Core) StepDown(httpCtx context.Context, req *logical.Request) (retErr error) { + defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now()) + + if req == nil { + retErr = multierror.Append(retErr, errors.New("nil request to step-down")) + return retErr + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + + if c.Sealed() { + return nil + } + if c.ha == nil || c.standby { + return nil + } + + ctx, cancel := context.WithCancel(namespace.RootContext(nil)) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + case <-httpCtx.Done(): + cancel() + } + }() + + acl, te, entity, identityPolicies, err := c.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + retErr = multierror.Append(retErr, err) + return retErr + } + + // Audit-log the request before going any further + auth := &logical.Auth{ + ClientToken: req.ClientToken, + Accessor: req.ClientTokenAccessor, + } + if te != nil { + auth.IdentityPolicies = identityPolicies[te.NamespaceID] + delete(identityPolicies, te.NamespaceID) + auth.ExternalNamespacePolicies = identityPolicies + auth.TokenPolicies = te.Policies + auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...) + auth.Metadata = te.Meta + auth.DisplayName = te.DisplayName + auth.EntityID = te.EntityID + auth.TokenType = te.Type + } + + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "request_path", req.Path, "error", err) + retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue")) + return retErr + } + + if entity != nil && entity.Disabled { + c.logger.Warn("permission denied as the entity on the token is disabled") + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + + if te != nil && te.EntityID != "" && entity == nil { + c.logger.Warn("permission denied as the entity on the token is invalid") + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + + // Attempt to use the token (decrement num_uses) + if te != nil { + te, err = c.tokenStore.UseToken(ctx, te) + if err != nil { + c.logger.Error("failed to use token", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return retErr + } + if te == nil { + // Token has been revoked + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return retErr + } + } + + // Verify that this operation is allowed + authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{ + RootPrivsRequired: true, + }) + if !authResults.Allowed { + retErr = multierror.Append(retErr, authResults.Error) + if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError { + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + } + return retErr + } + + if te != nil && te.NumUses == tokenRevocationPending { + // Token needs to be revoked. We do this immediately here because + // we won't have a token store after sealing. + leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(c.activeContext, te) + if err == nil { + err = c.expiration.Revoke(c.activeContext, leaseID) + } + if err != nil { + c.logger.Error("token needed revocation before step-down but failed to revoke", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + } + } + + select { + case c.manualStepDownCh <- struct{}{}: + default: + c.logger.Warn("manual step-down operation already queued") + } + + return retErr +} + +// runStandby is a long running process that manages a number of the HA +// subsystems. +func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { + defer close(doneCh) + defer close(manualStepDownCh) + c.logger.Info("entering standby mode") + + var g run.Group + newLeaderCh := addEnterpriseHaActors(c, &g) + { + // This will cause all the other actors to close when the stop channel + // is closed. + g.Add(func() error { + <-stopCh + return nil + }, func(error) {}) + } + { + // Monitor for key rotations + keyRotateStop := make(chan struct{}) + + g.Add(func() error { + c.periodicCheckKeyUpgrades(context.Background(), keyRotateStop) + return nil + }, func(error) { + close(keyRotateStop) + c.logger.Debug("shutting down periodic key rotation checker") + }) + } + { + // Monitor for new leadership + checkLeaderStop := make(chan struct{}) + + g.Add(func() error { + c.periodicLeaderRefresh(newLeaderCh, checkLeaderStop) + return nil + }, func(error) { + close(checkLeaderStop) + c.logger.Debug("shutting down periodic leader refresh") + }) + } + { + // Wait for leadership + leaderStopCh := make(chan struct{}) + + g.Add(func() error { + c.waitForLeadership(newLeaderCh, manualStepDownCh, leaderStopCh) + return nil + }, func(error) { + close(leaderStopCh) + c.logger.Debug("shutting down leader elections") + }) + } + + // Start all the actors + g.Run() +} + +// waitForLeadership is a long running routine that is used when an HA backend +// is enabled. It waits until we are leader and switches this Vault to +// active. +func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stopCh chan struct{}) { + var manualStepDown bool + for { + // Check for a shutdown + select { + case <-stopCh: + c.logger.Debug("stop channel triggered in runStandby") + return + default: + // If we've just down, we could instantly grab the lock again. Give + // the other nodes a chance. + if manualStepDown { + time.Sleep(manualStepDownSleepPeriod) + manualStepDown = false + } + } + + // Create a lock + uuid, err := uuid.GenerateUUID() + if err != nil { + c.logger.Error("failed to generate uuid", "error", err) + return + } + lock, err := c.ha.LockWith(CoreLockPath, uuid) + if err != nil { + c.logger.Error("failed to create lock", "error", err) + return + } + + // Attempt the acquisition + leaderLostCh := c.acquireLock(lock, stopCh) + + // Bail if we are being shutdown + if leaderLostCh == nil { + return + } + + if atomic.LoadUint32(c.neverBecomeActive) == 1 { + c.heldHALock = nil + lock.Unlock() + c.logger.Info("marked never become active, giving up active state") + continue + } + + c.logger.Info("acquired lock, enabling active operation") + + // This is used later to log a metrics event; this can be helpful to + // detect flapping + activeTime := time.Now() + + continueCh := interruptPerfStandby(newLeaderCh, stopCh) + + // Grab the statelock or stop + if stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, stopCh); stopped { + lock.Unlock() + close(continueCh) + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + return + } + + if c.Sealed() { + c.logger.Warn("grabbed HA lock but already sealed, exiting") + lock.Unlock() + close(continueCh) + c.stateLock.Unlock() + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + return + } + + // Store the lock so that we can manually clear it later if needed + c.heldHALock = lock + + // Create the active context + activeCtx, activeCtxCancel := context.WithCancel(namespace.RootContext(nil)) + c.activeContext = activeCtx + c.activeContextCancelFunc.Store(activeCtxCancel) + + // This block is used to wipe barrier/seal state and verify that + // everything is sane. If we have no sanity in the barrier, we actually + // seal, as there's little we can do. + { + c.seal.SetBarrierConfig(activeCtx, nil) + if c.seal.RecoveryKeySupported() { + c.seal.SetRecoveryConfig(activeCtx, nil) + } + + if err := c.performKeyUpgrades(activeCtx); err != nil { + c.logger.Error("error performing key upgrades", "error", err) + + // If we fail due to anything other than a context canceled + // error we should shutdown as we may have the incorrect Keys. + if !strings.Contains(err.Error(), context.Canceled.Error()) { + // We call this in a goroutine so that we can give up the + // statelock and have this shut us down; sealInternal has a + // workflow where it watches for the stopCh to close so we want + // to return from here + go c.Shutdown() + } + + c.heldHALock = nil + lock.Unlock() + close(continueCh) + c.stateLock.Unlock() + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + + // If we are shutting down we should return from this function, + // otherwise continue + if !strings.Contains(err.Error(), context.Canceled.Error()) { + continue + } else { + return + } + } + } + + { + // Clear previous local cluster cert info so we generate new. Since the + // UUID will have changed, standbys will know to look for new info + c.localClusterParsedCert.Store((*x509.Certificate)(nil)) + c.localClusterCert.Store(([]byte)(nil)) + c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil)) + + if err := c.setupCluster(activeCtx); err != nil { + c.heldHALock = nil + lock.Unlock() + close(continueCh) + c.stateLock.Unlock() + c.logger.Error("cluster setup failed", "error", err) + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + continue + } + + } + // Advertise as leader + if err := c.advertiseLeader(activeCtx, uuid, leaderLostCh); err != nil { + c.heldHALock = nil + lock.Unlock() + close(continueCh) + c.stateLock.Unlock() + c.logger.Error("leader advertisement setup failed", "error", err) + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + continue + } + + // Attempt the post-unseal process + err = c.postUnseal(activeCtx, activeCtxCancel, standardUnsealStrategy{}) + if err == nil { + c.standby = false + c.leaderUUID = uuid + } + + close(continueCh) + c.stateLock.Unlock() + + // Handle a failure to unseal + if err != nil { + c.logger.Error("post-unseal setup failed", "error", err) + lock.Unlock() + metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) + continue + } + + // Monitor a loss of leadership + select { + case <-leaderLostCh: + c.logger.Warn("leadership lost, stopping active operation") + case <-stopCh: + case <-manualStepDownCh: + manualStepDown = true + c.logger.Warn("stepping down from active operation to standby") + } + + // Stop Active Duty + { + // Spawn this in a go routine so we can cancel the context and + // unblock any inflight requests that are holding the statelock. + go func() { + select { + case <-activeCtx.Done(): + // Attempt to drain any inflight requests + case <-time.After(DefaultMaxRequestDuration): + activeCtxCancel() + } + }() + + // Grab lock if we are not stopped + stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, stopCh) + + // Cancel the context incase the above go routine hasn't done it + // yet + activeCtxCancel() + metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime) + + // Mark as standby + c.standby = true + c.leaderUUID = "" + + // Seal + if err := c.preSeal(); err != nil { + c.logger.Error("pre-seal teardown failed", "error", err) + } + + // If we are not meant to keep the HA lock, clear it + if atomic.LoadUint32(c.keepHALockOnStepDown) == 0 { + if err := c.clearLeader(uuid); err != nil { + c.logger.Error("clearing leader advertisement failed", "error", err) + } + + if err := c.heldHALock.Unlock(); err != nil { + c.logger.Error("unlocking HA lock failed", "error", err) + } + c.heldHALock = nil + } + + // If we are stopped return, otherwise unlock the statelock + if stopped { + return + } + c.stateLock.Unlock() + } + } +} + +// grabLockOrStop returns true if we failed to get the lock before stopCh +// was closed. Returns false if the lock was obtained, in which case it's +// the caller's responsibility to unlock it. +func grabLockOrStop(lockFunc, unlockFunc func(), stopCh chan struct{}) (stopped bool) { + // Grab the lock as we need it for cluster setup, which needs to happen + // before advertising; + lockGrabbedCh := make(chan struct{}) + go func() { + // Grab the lock + lockFunc() + // If stopCh has been closed, which only happens while the + // stateLock is held, we have actually terminated, so we just + // instantly give up the lock, otherwise we notify that it's ready + // for consumption + select { + case <-stopCh: + unlockFunc() + default: + close(lockGrabbedCh) + } + }() + + select { + case <-stopCh: + return true + case <-lockGrabbedCh: + // We now have the lock and can use it + } + + return false +} + +// This checks the leader periodically to ensure that we switch RPC to a new +// leader pretty quickly. There is logic in Leader() already to not make this +// onerous and avoid more traffic than needed, so we just call that and ignore +// the result. +func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct{}) { + opCount := new(int32) + + clusterAddr := "" + for { + select { + case <-time.After(leaderCheckInterval): + count := atomic.AddInt32(opCount, 1) + if count > 1 { + atomic.AddInt32(opCount, -1) + continue + } + // We do this in a goroutine because otherwise if this refresh is + // called while we're shutting down the call to Leader() can + // deadlock, which then means stopCh can never been seen and we can + // block shutdown + go func() { + // Bind locally, as the race detector is tripping here + lopCount := opCount + isLeader, _, newClusterAddr, _ := c.Leader() + + // If we are the leader reset the clusterAddr since the next + // failover might go to the node that was previously active. + if isLeader { + clusterAddr = "" + } + + if !isLeader && newClusterAddr != clusterAddr && newLeaderCh != nil { + select { + case newLeaderCh <- nil: + c.logger.Debug("new leader found, triggering new leader channel") + clusterAddr = newClusterAddr + default: + c.logger.Debug("new leader found, but still processing previous leader change") + } + + } + atomic.AddInt32(lopCount, -1) + }() + case <-stopCh: + return + } + } +} + +// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby +func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{}) { + opCount := new(int32) + _, isRaft := c.underlyingPhysical.(*raft.RaftBackend) + for { + select { + case <-time.After(keyRotateCheckInterval): + count := atomic.AddInt32(opCount, 1) + if count > 1 { + atomic.AddInt32(opCount, -1) + continue + } + + go func() { + // Bind locally, as the race detector is tripping here + lopCount := opCount + + // Only check if we are a standby + c.stateLock.RLock() + standby := c.standby + c.stateLock.RUnlock() + if !standby { + atomic.AddInt32(lopCount, -1) + return + } + + // Check for a poison pill. If we can read it, it means we have stale + // keys (e.g. from replication being activated) and we need to seal to + // be unsealed again. + entry, _ := c.barrier.Get(ctx, poisonPillPath) + if entry != nil && len(entry.Value) > 0 { + c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again") + // If we are using raft storage we do not want to shut down + // raft during replication secondary enablement. This will + // allow us to keep making progress on the raft log. + go c.sealInternalWithOptions(true, false, !isRaft) + atomic.AddInt32(lopCount, -1) + return + } + + if err := c.checkKeyUpgrades(ctx); err != nil { + c.logger.Error("key rotation periodic upgrade check failed", "error", err) + } + + if err := c.checkRaftTLSKeyUpgrades(ctx); err != nil { + c.logger.Error("raft tls periodic upgrade check failed", "error", err) + } + + atomic.AddInt32(lopCount, -1) + return + }() + case <-stopCh: + return + } + } +} + +// checkKeyUpgrades is used to check if there have been any key rotations +// and if there is a chain of upgrades available +func (c *Core) checkKeyUpgrades(ctx context.Context) error { + for { + // Check for an upgrade + didUpgrade, newTerm, err := c.barrier.CheckUpgrade(ctx) + if err != nil { + return err + } + + // Nothing to do if no upgrade + if !didUpgrade { + break + } + if c.logger.IsInfo() { + c.logger.Info("upgraded to new key term", "term", newTerm) + } + } + return nil +} + +func (c *Core) reloadMasterKey(ctx context.Context) error { + if err := c.barrier.ReloadMasterKey(ctx); err != nil { + return errwrap.Wrapf("error reloading master key: {{err}}", err) + } + return nil +} + +func (c *Core) reloadShamirKey(ctx context.Context) error { + _ = c.seal.SetBarrierConfig(ctx, nil) + if cfg, _ := c.seal.BarrierConfig(ctx); cfg == nil { + return nil + } + var shamirKey []byte + switch c.seal.StoredKeysSupported() { + case StoredKeysSupportedGeneric: + return nil + case StoredKeysSupportedShamirMaster: + entry, err := c.barrier.Get(ctx, shamirKekPath) + if err != nil { + return err + } + if entry == nil { + return nil + } + shamirKey = entry.Value + case StoredKeysNotSupported: + keyring, err := c.barrier.Keyring() + if err != nil { + return errwrap.Wrapf("failed to update seal access: {{err}}", err) + } + shamirKey = keyring.masterKey + } + return c.seal.GetAccess().(*shamir.ShamirSeal).SetKey(shamirKey) +} + +func (c *Core) performKeyUpgrades(ctx context.Context) error { + if err := c.checkKeyUpgrades(ctx); err != nil { + return errwrap.Wrapf("error checking for key upgrades: {{err}}", err) + } + + if err := c.reloadMasterKey(ctx); err != nil { + return errwrap.Wrapf("error reloading master key: {{err}}", err) + } + + if err := c.barrier.ReloadKeyring(ctx); err != nil { + return errwrap.Wrapf("error reloading keyring: {{err}}", err) + } + + if err := c.reloadShamirKey(ctx); err != nil { + return errwrap.Wrapf("error reloading shamir kek key: {{err}}", err) + } + + if err := c.scheduleUpgradeCleanup(ctx); err != nil { + return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err) + } + + return nil +} + +// scheduleUpgradeCleanup is used to ensure that all the upgrade paths +// are cleaned up in a timely manner if a leader failover takes place +func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error { + // List the upgrades + upgrades, err := c.barrier.List(ctx, keyringUpgradePrefix) + if err != nil { + return errwrap.Wrapf("failed to list upgrades: {{err}}", err) + } + + // Nothing to do if no upgrades + if len(upgrades) == 0 { + return nil + } + + // Schedule cleanup for all of them + time.AfterFunc(KeyRotateGracePeriod, func() { + sealed, err := c.barrier.Sealed() + if err != nil { + c.logger.Warn("failed to check barrier status at upgrade cleanup time") + return + } + if sealed { + c.logger.Warn("barrier sealed at upgrade cleanup time") + return + } + for _, upgrade := range upgrades { + path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade) + if err := c.barrier.Delete(ctx, path); err != nil { + c.logger.Error("failed to cleanup upgrade", "path", path, "error", err) + } + } + }) + return nil +} + +// acquireLock blocks until the lock is acquired, returning the leaderLostCh +func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} { + for { + // Attempt lock acquisition + leaderLostCh, err := lock.Lock(stopCh) + if err == nil { + return leaderLostCh + } + + // Retry the acquisition + c.logger.Error("failed to acquire lock", "error", err) + select { + case <-time.After(lockRetryInterval): + case <-stopCh: + return nil + } + } +} + +// advertiseLeader is used to advertise the current node as leader +func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) error { + if leaderLostCh != nil { + go c.cleanLeaderPrefix(ctx, uuid, leaderLostCh) + } + + var key *ecdsa.PrivateKey + switch c.localClusterPrivateKey.Load().(type) { + case *ecdsa.PrivateKey: + key = c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey) + default: + c.logger.Error("unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load())) + return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load()) + } + + keyParams := &certutil.ClusterKeyParams{ + Type: corePrivateKeyTypeP521, + X: key.X, + Y: key.Y, + D: key.D, + } + + locCert := c.localClusterCert.Load().([]byte) + localCert := make([]byte, len(locCert)) + copy(localCert, locCert) + adv := &activeAdvertisement{ + RedirectAddr: c.redirectAddr, + ClusterAddr: c.ClusterAddr(), + ClusterCert: localCert, + ClusterKeyParams: keyParams, + } + val, err := jsonutil.EncodeJSON(adv) + if err != nil { + return err + } + ent := &logical.StorageEntry{ + Key: coreLeaderPrefix + uuid, + Value: val, + } + err = c.barrier.Put(ctx, ent) + if err != nil { + return err + } + + sd, ok := c.ha.(physical.ServiceDiscovery) + if ok { + if err := sd.NotifyActiveStateChange(); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("failed to notify active status", "error", err) + } + } + } + return nil +} + +func (c *Core) cleanLeaderPrefix(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) { + keys, err := c.barrier.List(ctx, coreLeaderPrefix) + if err != nil { + c.logger.Error("failed to list entries in core/leader", "error", err) + return + } + for len(keys) > 0 { + select { + case <-time.After(leaderPrefixCleanDelay): + if keys[0] != uuid { + c.barrier.Delete(ctx, coreLeaderPrefix+keys[0]) + } + keys = keys[1:] + case <-leaderLostCh: + return + } + } +} + +// clearLeader is used to clear our leadership entry +func (c *Core) clearLeader(uuid string) error { + key := coreLeaderPrefix + uuid + err := c.barrier.Delete(context.Background(), key) + + // Advertise ourselves as a standby + sd, ok := c.ha.(physical.ServiceDiscovery) + if ok { + if err := sd.NotifyActiveStateChange(); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("failed to notify standby status", "error", err) + } + } + } + + return err +} + +func (c *Core) SetNeverBecomeActive(on bool) { + if on { + atomic.StoreUint32(c.neverBecomeActive, 1) + } else { + atomic.StoreUint32(c.neverBecomeActive, 0) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_lookup.go b/vendor/github.com/hashicorp/vault/vault/identity_lookup.go new file mode 100644 index 00000000..a85577a7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_lookup.go @@ -0,0 +1,329 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func lookupPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "lookup/entity$", + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the entity.", + }, + "id": { + Type: framework.TypeString, + Description: "ID of the entity.", + }, + "alias_id": { + Type: framework.TypeString, + Description: "ID of the alias.", + }, + "alias_name": { + Type: framework.TypeString, + Description: "Name of the alias. This should be supplied in conjunction with 'alias_mount_accessor'.", + }, + "alias_mount_accessor": { + Type: framework.TypeString, + Description: "Accessor of the mount to which the alias belongs to. This should be supplied in conjunction with 'alias_name'.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathLookupEntityUpdate(), + }, + + HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-entity"][0]), + HelpDescription: strings.TrimSpace(lookupHelp["lookup-entity"][1]), + }, + { + Pattern: "lookup/group$", + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the group.", + }, + "id": { + Type: framework.TypeString, + Description: "ID of the group.", + }, + "alias_id": { + Type: framework.TypeString, + Description: "ID of the alias.", + }, + "alias_name": { + Type: framework.TypeString, + Description: "Name of the alias. This should be supplied in conjunction with 'alias_mount_accessor'.", + }, + "alias_mount_accessor": { + Type: framework.TypeString, + Description: "Accessor of the mount to which the alias belongs to. This should be supplied in conjunction with 'alias_name'.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathLookupGroupUpdate(), + }, + + HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-group"][0]), + HelpDescription: strings.TrimSpace(lookupHelp["lookup-group"][1]), + }, + } +} + +func (i *IdentityStore) pathLookupEntityUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var entity *identity.Entity + var err error + + inputCount := 0 + + id := "" + idRaw, ok := d.GetOk("id") + if ok { + inputCount++ + id = idRaw.(string) + } + + name := "" + nameRaw, ok := d.GetOk("name") + if ok { + inputCount++ + name = nameRaw.(string) + } + + aliasID := "" + aliasIDRaw, ok := d.GetOk("alias_id") + if ok { + inputCount++ + aliasID = aliasIDRaw.(string) + } + + aliasName := "" + aliasNameRaw, ok := d.GetOk("alias_name") + if ok { + inputCount++ + aliasName = aliasNameRaw.(string) + } + + aliasMountAccessor := "" + aliasMountAccessorRaw, ok := d.GetOk("alias_mount_accessor") + if ok { + inputCount++ + aliasMountAccessor = aliasMountAccessorRaw.(string) + } + + switch { + case inputCount == 0: + return logical.ErrorResponse(fmt.Sprintf("query parameter not supplied")), nil + + case inputCount != 1: + switch { + case inputCount == 2 && aliasName != "" && aliasMountAccessor != "": + default: + return logical.ErrorResponse(fmt.Sprintf("query parameter conflict; please supply distinct set of query parameters")), nil + } + + case inputCount == 1: + switch { + case aliasName != "" || aliasMountAccessor != "": + return logical.ErrorResponse(fmt.Sprintf("both 'alias_name' and 'alias_mount_accessor' needs to be set")), nil + } + } + + switch { + case id != "": + entity, err = i.MemDBEntityByID(id, false) + if err != nil { + return nil, err + } + + case name != "": + entity, err = i.MemDBEntityByName(ctx, name, false) + if err != nil { + return nil, err + } + + case aliasID != "": + alias, err := i.MemDBAliasByID(aliasID, false, false) + if err != nil { + return nil, err + } + + if alias == nil { + break + } + + entity, err = i.MemDBEntityByAliasID(alias.ID, false) + if err != nil { + return nil, err + } + + case aliasName != "" && aliasMountAccessor != "": + alias, err := i.MemDBAliasByFactors(aliasMountAccessor, aliasName, false, false) + if err != nil { + return nil, err + } + + if alias == nil { + break + } + + entity, err = i.MemDBEntityByAliasID(alias.ID, false) + if err != nil { + return nil, err + } + } + + if entity == nil { + return nil, nil + } + + return i.handleEntityReadCommon(ctx, entity) + } +} + +func (i *IdentityStore) pathLookupGroupUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var group *identity.Group + var err error + + inputCount := 0 + + id := "" + idRaw, ok := d.GetOk("id") + if ok { + inputCount++ + id = idRaw.(string) + } + + name := "" + nameRaw, ok := d.GetOk("name") + if ok { + inputCount++ + name = nameRaw.(string) + } + + aliasID := "" + aliasIDRaw, ok := d.GetOk("alias_id") + if ok { + inputCount++ + aliasID = aliasIDRaw.(string) + } + + aliasName := "" + aliasNameRaw, ok := d.GetOk("alias_name") + if ok { + inputCount++ + aliasName = aliasNameRaw.(string) + } + + aliasMountAccessor := "" + aliasMountAccessorRaw, ok := d.GetOk("alias_mount_accessor") + if ok { + inputCount++ + aliasMountAccessor = aliasMountAccessorRaw.(string) + } + + switch { + case inputCount == 0: + return logical.ErrorResponse(fmt.Sprintf("query parameter not supplied")), nil + + case inputCount != 1: + switch { + case inputCount == 2 && aliasName != "" && aliasMountAccessor != "": + default: + return logical.ErrorResponse(fmt.Sprintf("query parameter conflict; please supply distinct set of query parameters")), nil + } + + case inputCount == 1: + switch { + case aliasName != "" || aliasMountAccessor != "": + return logical.ErrorResponse(fmt.Sprintf("both 'alias_name' and 'alias_mount_accessor' needs to be set")), nil + } + } + + switch { + case id != "": + group, err = i.MemDBGroupByID(id, false) + if err != nil { + return nil, err + } + case name != "": + group, err = i.MemDBGroupByName(ctx, name, false) + if err != nil { + return nil, err + } + case aliasID != "": + alias, err := i.MemDBAliasByID(aliasID, false, true) + if err != nil { + return nil, err + } + + if alias == nil { + break + } + + group, err = i.MemDBGroupByAliasID(alias.ID, false) + if err != nil { + return nil, err + } + + case aliasName != "" && aliasMountAccessor != "": + alias, err := i.MemDBAliasByFactors(aliasMountAccessor, aliasName, false, true) + if err != nil { + return nil, err + } + + if alias == nil { + break + } + + group, err = i.MemDBGroupByAliasID(alias.ID, false) + if err != nil { + return nil, err + } + } + + if group == nil { + return nil, nil + } + + return i.handleGroupReadCommon(ctx, group) + } +} + +var lookupHelp = map[string][2]string{ + "lookup-entity": { + "Query entities based on various properties.", + `Distinct query parameters to be set: + - 'id' + To query the entity by its ID. + - 'name' + To query the entity by its name. + - 'alias_id' + To query the entity by the ID of any of its aliases. + - 'alias_name' and 'alias_mount_accessor' + To query the entity by the unique factors that represent an alias; the name and the mount accessor. + `, + }, + "lookup-group": { + "Query groups based on various properties.", + `Distinct query parameters to be set: + - 'id' + To query the group by its ID. + - 'name' + To query the group by its name. + - 'alias_id' + To query the group by the ID of any of its aliases. + - 'alias_name' and 'alias_mount_accessor' + To query the group by the unique factors that represent an alias; the name and the mount accessor. + `, + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store.go b/vendor/github.com/hashicorp/vault/vault/identity_store.go new file mode 100644 index 00000000..f4de53c1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store.go @@ -0,0 +1,592 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + groupBucketsPrefix = "packer/group/buckets/" +) + +var ( + caseSensitivityKey = "casesensitivity" + sendGroupUpgrade = func(*IdentityStore, *identity.Group) (bool, error) { return false, nil } + parseExtraEntityFromBucket = func(context.Context, *IdentityStore, *identity.Entity) (bool, error) { return false, nil } + addExtraEntityDataToResponse = func(*identity.Entity, map[string]interface{}) {} +) + +func (c *Core) IdentityStore() *IdentityStore { + return c.identityStore +} + +func (i *IdentityStore) resetDB(ctx context.Context) error { + var err error + + i.db, err = memdb.NewMemDB(identityStoreSchema(!i.disableLowerCasedNames)) + if err != nil { + return err + } + + return nil +} + +func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendConfig, logger log.Logger) (*IdentityStore, error) { + iStore := &IdentityStore{ + view: config.StorageView, + logger: logger, + core: core, + } + + // Create a memdb instance, which by default, operates on lower cased + // identity names + err := iStore.resetDB(ctx) + if err != nil { + return nil, err + } + + entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities") + core.AddLogger(entitiesPackerLogger) + groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") + core.AddLogger(groupsPackerLogger) + iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "") + if err != nil { + return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) + } + + iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, groupsPackerLogger, groupBucketsPrefix) + if err != nil { + return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) + } + + iStore.Backend = &framework.Backend{ + BackendType: logical.TypeLogical, + Paths: iStore.paths(), + Invalidate: iStore.Invalidate, + InitializeFunc: iStore.initialize, + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "oidc/.well-known/*", + }, + }, + PeriodicFunc: func(ctx context.Context, req *logical.Request) error { + iStore.oidcPeriodicFunc(ctx) + + return nil + }, + } + + iStore.oidcCache = newOIDCCache() + + err = iStore.Setup(ctx, config) + if err != nil { + return nil, err + } + + return iStore, nil +} + +func (i *IdentityStore) paths() []*framework.Path { + return framework.PathAppend( + entityPaths(i), + aliasPaths(i), + groupAliasPaths(i), + groupPaths(i), + lookupPaths(i), + upgradePaths(i), + oidcPaths(i), + ) +} + +func (i *IdentityStore) initialize(ctx context.Context, req *logical.InitializationRequest) error { + // Only primary should write the status + if i.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby | consts.ReplicationDRSecondary) { + return nil + } + + entry, err := logical.StorageEntryJSON(caseSensitivityKey, &casesensitivity{ + DisableLowerCasedNames: i.disableLowerCasedNames, + }) + if err != nil { + return err + } + + return i.view.Put(ctx, entry) +} + +// Invalidate is a callback wherein the backend is informed that the value at +// the given key is updated. In identity store's case, it would be the entity +// storage entries that get updated. The value needs to be read and MemDB needs +// to be updated accordingly. +func (i *IdentityStore) Invalidate(ctx context.Context, key string) { + i.logger.Debug("invalidate notification received", "key", key) + + i.lock.Lock() + defer i.lock.Unlock() + + switch { + case key == caseSensitivityKey: + entry, err := i.view.Get(ctx, caseSensitivityKey) + if err != nil { + i.logger.Error("failed to read case sensitivity setting during invalidation", "error", err) + return + } + if entry == nil { + return + } + + var setting casesensitivity + if err := entry.DecodeJSON(&setting); err != nil { + i.logger.Error("failed to decode case sensitivity setting during invalidation", "error", err) + return + } + + // Fast return if the setting is the same + if i.disableLowerCasedNames == setting.DisableLowerCasedNames { + return + } + + // If the setting is different, reset memdb and reload all the artifacts + i.disableLowerCasedNames = setting.DisableLowerCasedNames + if err := i.resetDB(ctx); err != nil { + i.logger.Error("failed to reset memdb during invalidation", "error", err) + return + } + if err := i.loadEntities(ctx); err != nil { + i.logger.Error("failed to load entities during invalidation", "error", err) + return + } + if err := i.loadGroups(ctx); err != nil { + i.logger.Error("failed to load groups during invalidation", "error", err) + return + } + // Check if the key is a storage entry key for an entity bucket + case strings.HasPrefix(key, storagepacker.StoragePackerBucketsPrefix): + // Create a MemDB transaction + txn := i.db.Txn(true) + defer txn.Abort() + + // Each entity object in MemDB holds the MD5 hash of the storage + // entry key of the entity bucket. Fetch all the entities that + // belong to this bucket using the hash value. Remove these entities + // from MemDB along with all the aliases of each entity. + entitiesFetched, err := i.MemDBEntitiesByBucketKeyInTxn(txn, key) + if err != nil { + i.logger.Error("failed to fetch entities using the bucket key", "key", key) + return + } + + for _, entity := range entitiesFetched { + // Delete all the aliases in the entity. This function will also remove + // the corresponding alias indexes too. + err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases) + if err != nil { + i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err) + return + } + + // Delete the entity using the same transaction + err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID) + if err != nil { + i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err) + return + } + } + + // Get the storage bucket entry + bucket, err := i.entityPacker.GetBucket(key) + if err != nil { + i.logger.Error("failed to refresh entities", "key", key, "error", err) + return + } + + // If the underlying entry is nil, it means that this invalidation + // notification is for the deletion of the underlying storage entry. At + // this point, since all the entities belonging to this bucket are + // already removed, there is nothing else to be done. But, if the + // storage entry is non-nil, its an indication of an update. In this + // case, entities in the updated bucket needs to be reinserted into + // MemDB. + if bucket != nil { + for _, item := range bucket.Items { + entity, err := i.parseEntityFromBucketItem(ctx, item) + if err != nil { + i.logger.Error("failed to parse entity from bucket entry item", "error", err) + return + } + + // Only update MemDB and don't touch the storage + err = i.upsertEntityInTxn(ctx, txn, entity, nil, false) + if err != nil { + i.logger.Error("failed to update entity in MemDB", "error", err) + return + } + } + } + + txn.Commit() + return + + // Check if the key is a storage entry key for an group bucket + case strings.HasPrefix(key, groupBucketsPrefix): + // Create a MemDB transaction + txn := i.db.Txn(true) + defer txn.Abort() + + groupsFetched, err := i.MemDBGroupsByBucketKeyInTxn(txn, key) + if err != nil { + i.logger.Error("failed to fetch groups using the bucket key", "key", key) + return + } + + for _, group := range groupsFetched { + // Delete the group using the same transaction + err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID) + if err != nil { + i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err) + return + } + + if group.Alias != nil { + err := i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true) + if err != nil { + i.logger.Error("failed to delete group alias from MemDB", "error", err) + return + } + } + } + + // Get the storage bucket entry + bucket, err := i.groupPacker.GetBucket(key) + if err != nil { + i.logger.Error("failed to refresh group", "key", key, "error", err) + return + } + + if bucket != nil { + for _, item := range bucket.Items { + group, err := i.parseGroupFromBucketItem(item) + if err != nil { + i.logger.Error("failed to parse group from bucket entry item", "error", err) + return + } + + // Before updating the group, check if the group exists. If it + // does, then delete the group alias from memdb, for the + // invalidation would have sent an update. + groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true) + if err != nil { + i.logger.Error("failed to fetch group from MemDB", "error", err) + return + } + + // If the group has an alias remove it from memdb + if groupFetched != nil && groupFetched.Alias != nil { + err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true) + if err != nil { + i.logger.Error("failed to delete old group alias from MemDB", "error", err) + return + } + } + + // Only update MemDB and don't touch the storage + err = i.UpsertGroupInTxn(ctx, txn, group, false) + if err != nil { + i.logger.Error("failed to update group in MemDB", "error", err) + return + } + } + } + + txn.Commit() + return + + case strings.HasPrefix(key, oidcTokensPrefix): + ns, err := namespace.FromContext(ctx) + if err != nil { + i.logger.Error("error retrieving namespace", "error", err) + return + } + + // Wipe the cache for the requested namespace. This will also clear + // the shared namespace as well. + if err := i.oidcCache.Flush(ns); err != nil { + i.logger.Error("error flushing oidc cache", "error", err) + } + } +} + +func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *storagepacker.Item) (*identity.Entity, error) { + if item == nil { + return nil, fmt.Errorf("nil item") + } + + persistNeeded := false + + var entity identity.Entity + err := ptypes.UnmarshalAny(item.Message, &entity) + if err != nil { + // If we encounter an error, it would mean that the format of the + // entity is an older one. Try decoding using the older format and if + // successful, upgrage the storage with the newer format. + var oldEntity identity.EntityStorageEntry + oldEntityErr := ptypes.UnmarshalAny(item.Message, &oldEntity) + if oldEntityErr != nil { + return nil, errwrap.Wrapf("failed to decode entity from storage bucket item: {{err}}", err) + } + + i.logger.Debug("upgrading the entity using patch introduced with vault 0.8.2.1", "entity_id", oldEntity.ID) + + // Successfully decoded entity using older format. Entity is stored + // with older format. Upgrade it. + entity.ID = oldEntity.ID + entity.Name = oldEntity.Name + entity.Metadata = oldEntity.Metadata + entity.CreationTime = oldEntity.CreationTime + entity.LastUpdateTime = oldEntity.LastUpdateTime + entity.MergedEntityIDs = oldEntity.MergedEntityIDs + entity.Policies = oldEntity.Policies + entity.BucketKey = oldEntity.BucketKeyHash + entity.MFASecrets = oldEntity.MFASecrets + // Copy each alias individually since the format of aliases were + // also different + for _, oldAlias := range oldEntity.Personas { + var newAlias identity.Alias + newAlias.ID = oldAlias.ID + newAlias.Name = oldAlias.Name + newAlias.CanonicalID = oldAlias.EntityID + newAlias.MountType = oldAlias.MountType + newAlias.MountAccessor = oldAlias.MountAccessor + newAlias.MountPath = oldAlias.MountPath + newAlias.Metadata = oldAlias.Metadata + newAlias.CreationTime = oldAlias.CreationTime + newAlias.LastUpdateTime = oldAlias.LastUpdateTime + newAlias.MergedFromCanonicalIDs = oldAlias.MergedFromEntityIDs + entity.Aliases = append(entity.Aliases, &newAlias) + } + + persistNeeded = true + } + + pN, err := parseExtraEntityFromBucket(ctx, i, &entity) + if err != nil { + return nil, err + } + if pN { + persistNeeded = true + } + + if persistNeeded && !i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { + entityAsAny, err := ptypes.MarshalAny(&entity) + if err != nil { + return nil, err + } + + item := &storagepacker.Item{ + ID: entity.ID, + Message: entityAsAny, + } + + // Store the entity with new format + err = i.entityPacker.PutItem(ctx, item) + if err != nil { + return nil, err + } + } + + if entity.NamespaceID == "" { + entity.NamespaceID = namespace.RootNamespaceID + } + + return &entity, nil +} + +func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*identity.Group, error) { + if item == nil { + return nil, fmt.Errorf("nil item") + } + + var group identity.Group + err := ptypes.UnmarshalAny(item.Message, &group) + if err != nil { + return nil, errwrap.Wrapf("failed to decode group from storage bucket item: {{err}}", err) + } + + if group.NamespaceID == "" { + group.NamespaceID = namespace.RootNamespaceID + } + + return &group, nil +} + +// entityByAliasFactors fetches the entity based on factors of alias, i.e mount +// accessor and the alias name. +func (i *IdentityStore) entityByAliasFactors(mountAccessor, aliasName string, clone bool) (*identity.Entity, error) { + if mountAccessor == "" { + return nil, fmt.Errorf("missing mount accessor") + } + + if aliasName == "" { + return nil, fmt.Errorf("missing alias name") + } + + txn := i.db.Txn(false) + + return i.entityByAliasFactorsInTxn(txn, mountAccessor, aliasName, clone) +} + +// entityByAlaisFactorsInTxn fetches the entity based on factors of alias, i.e +// mount accessor and the alias name. +func (i *IdentityStore) entityByAliasFactorsInTxn(txn *memdb.Txn, mountAccessor, aliasName string, clone bool) (*identity.Entity, error) { + if txn == nil { + return nil, fmt.Errorf("nil txn") + } + + if mountAccessor == "" { + return nil, fmt.Errorf("missing mount accessor") + } + + if aliasName == "" { + return nil, fmt.Errorf("missing alias name") + } + + alias, err := i.MemDBAliasByFactorsInTxn(txn, mountAccessor, aliasName, false, false) + if err != nil { + return nil, err + } + + if alias == nil { + return nil, nil + } + + return i.MemDBEntityByAliasIDInTxn(txn, alias.ID, clone) +} + +// CreateOrFetchEntity creates a new entity. This is used by core to +// associate each login attempt by an alias to a unified entity in Vault. +func (i *IdentityStore) CreateOrFetchEntity(ctx context.Context, alias *logical.Alias) (*identity.Entity, error) { + var entity *identity.Entity + var err error + var update bool + + if alias == nil { + return nil, fmt.Errorf("alias is nil") + } + + if alias.Name == "" { + return nil, fmt.Errorf("empty alias name") + } + + mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor) + if mountValidationResp == nil { + return nil, fmt.Errorf("invalid mount accessor %q", alias.MountAccessor) + } + + if mountValidationResp.MountLocal { + return nil, fmt.Errorf("mount_accessor %q is of a local mount", alias.MountAccessor) + } + + if mountValidationResp.MountType != alias.MountType { + return nil, fmt.Errorf("mount accessor %q is not a mount of type %q", alias.MountAccessor, alias.MountType) + } + + // Check if an entity already exists for the given alias + entity, err = i.entityByAliasFactors(alias.MountAccessor, alias.Name, false) + if err != nil { + return nil, err + } + if entity != nil && changedAliasIndex(entity, alias) == -1 { + return entity, nil + } + + i.lock.Lock() + defer i.lock.Unlock() + + // Create a MemDB transaction to update both alias and entity + txn := i.db.Txn(true) + defer txn.Abort() + + // Check if an entity was created before acquiring the lock + entity, err = i.entityByAliasFactorsInTxn(txn, alias.MountAccessor, alias.Name, true) + if err != nil { + return nil, err + } + if entity != nil { + idx := changedAliasIndex(entity, alias) + if idx == -1 { + return entity, nil + } + a := entity.Aliases[idx] + a.Metadata = alias.Metadata + a.LastUpdateTime = ptypes.TimestampNow() + + update = true + } + + if !update { + entity = new(identity.Entity) + err = i.sanitizeEntity(ctx, entity) + if err != nil { + return nil, err + } + + // Create a new alias + newAlias := &identity.Alias{ + CanonicalID: entity.ID, + Name: alias.Name, + MountAccessor: alias.MountAccessor, + Metadata: alias.Metadata, + MountPath: mountValidationResp.MountPath, + MountType: mountValidationResp.MountType, + } + + err = i.sanitizeAlias(ctx, newAlias) + if err != nil { + return nil, err + } + + i.logger.Debug("creating a new entity", "alias", newAlias) + + // Append the new alias to the new entity + entity.Aliases = []*identity.Alias{ + newAlias, + } + } + + // Update MemDB and persist entity object + err = i.upsertEntityInTxn(ctx, txn, entity, nil, true) + if err != nil { + return nil, err + } + + txn.Commit() + + return entity, nil +} + +// changedAliasIndex searches an entity for changed alias metadata. +// +// If a match is found, the changed alias's index is returned. If no alias +// names match or no metadata is different, -1 is returned. +func changedAliasIndex(entity *identity.Entity, alias *logical.Alias) int { + for i, a := range entity.Aliases { + if a.Name == alias.Name && !strutil.EqualStringMaps(a.Metadata, alias.Metadata) { + return i + } + } + + return -1 +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go b/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go new file mode 100644 index 00000000..f51f2d2e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go @@ -0,0 +1,535 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// aliasPaths returns the API endpoints to operate on aliases. +// Following are the paths supported: +// entity-alias - To register/modify an alias +// entity-alias/id - To read, modify, delete and list aliases based on their ID +func aliasPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "entity-alias$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the entity alias. If set, updates the corresponding entity alias.", + }, + // entity_id is deprecated in favor of canonical_id + "entity_id": { + Type: framework.TypeString, + Description: `Entity ID to which this alias belongs. +This field is deprecated, use canonical_id.`, + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to; unused for a modify", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias; unused for a modify", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleAliasCreateUpdate(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]), + }, + { + Pattern: "entity-alias/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the alias", + }, + // entity_id is deprecated + "entity_id": { + Type: framework.TypeString, + Description: `Entity ID to which this alias belongs to. +This field is deprecated, use canonical_id.`, + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias should be tied to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "(Unused)", + }, + "name": { + Type: framework.TypeString, + Description: "(Unused)", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleAliasCreateUpdate(), + logical.ReadOperation: i.pathAliasIDRead(), + logical.DeleteOperation: i.pathAliasIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]), + }, + { + Pattern: "entity-alias/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathAliasIDList(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]), + }, + } +} + +// handleAliasCreateUpdate is used to create or update an alias +func (i *IdentityStore) handleAliasCreateUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var err error + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + // Get alias name, if any + name := d.Get("name").(string) + + // Get mount accessor, if any + mountAccessor := d.Get("mount_accessor").(string) + + // Get ID, if any + id := d.Get("id").(string) + + // Get entity id + canonicalID := d.Get("canonical_id").(string) + if canonicalID == "" { + // For backwards compatibility + canonicalID = d.Get("entity_id").(string) + } + + i.lock.Lock() + defer i.lock.Unlock() + + // This block is run if they provided an ID + { + // If they provide an ID it must be an update. Find the alias, perform + // due diligence, call the update function. + if id != "" { + alias, err := i.MemDBAliasByID(id, true, false) + if err != nil { + return nil, err + } + if alias == nil { + return logical.ErrorResponse("invalid alias ID provided"), nil + } + if alias.NamespaceID != ns.ID { + return logical.ErrorResponse("cannot modify aliases across namespaces"), logical.ErrPermissionDenied + } + + switch { + case mountAccessor == "" && name == "": + // Just a canonical ID update, maybe + if canonicalID == "" { + // Nothing to do, so be idempotent + return nil, nil + } + + name = alias.Name + mountAccessor = alias.MountAccessor + + case mountAccessor == "": + // No change to mount accessor + mountAccessor = alias.MountAccessor + + case name == "": + // No change to mount name + name = alias.Name + + default: + // Both provided + } + + return i.handleAliasUpdate(ctx, req, canonicalID, name, mountAccessor, alias) + } + } + + // If they didn't provide an ID, we must have both accessor and name provided + if mountAccessor == "" || name == "" { + return logical.ErrorResponse("'id' or 'mount_accessor' and 'name' must be provided"), nil + } + + // Look up the alias by factors; if it's found it's an update + mountEntry := i.core.router.MatchingMountByAccessor(mountAccessor) + if mountEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", mountAccessor)), nil + } + if mountEntry.Local { + return logical.ErrorResponse(fmt.Sprintf("mount accessor %q is of a local mount", mountAccessor)), nil + } + if mountEntry.NamespaceID != ns.ID { + return logical.ErrorResponse("matching mount is in a different namespace than request"), logical.ErrPermissionDenied + } + alias, err := i.MemDBAliasByFactors(mountAccessor, name, false, false) + if err != nil { + return nil, err + } + if alias != nil { + if alias.NamespaceID != ns.ID { + return logical.ErrorResponse("cannot modify aliases across namespaces"), logical.ErrPermissionDenied + } + + return i.handleAliasUpdate(ctx, req, alias.CanonicalID, name, mountAccessor, alias) + } + + // At this point we know it's a new creation request + return i.handleAliasCreate(ctx, req, canonicalID, name, mountAccessor) + } +} + +func (i *IdentityStore) handleAliasCreate(ctx context.Context, req *logical.Request, canonicalID, name, mountAccessor string) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + alias := &identity.Alias{ + MountAccessor: mountAccessor, + Name: name, + } + entity := &identity.Entity{} + + // If a canonical ID is provided pull up the entity and make sure we're in + // the right NS + if canonicalID != "" { + entity, err = i.MemDBEntityByID(canonicalID, true) + if err != nil { + return nil, err + } + if entity == nil { + return logical.ErrorResponse("invalid canonical ID"), nil + } + if entity.NamespaceID != ns.ID { + return logical.ErrorResponse("entity found with 'canonical_id' not in request namespace"), logical.ErrPermissionDenied + } + } + + entity.Aliases = append(entity.Aliases, alias) + + // ID creation and other validations; This is more useful for new entities + // and may not perform anything for the existing entities. Placing the + // check here to make the flow common for both new and existing entities. + err = i.sanitizeEntity(ctx, entity) + if err != nil { + return nil, err + } + + // Set the canonical ID in the alias index. This should be done after + // sanitizing entity in case it's a new entity that didn't have an ID. + alias.CanonicalID = entity.ID + + // ID creation and other validations + err = i.sanitizeAlias(ctx, alias) + if err != nil { + return nil, err + } + + // Index entity and its aliases in MemDB and persist entity along with + // aliases in storage. + if err := i.upsertEntity(ctx, entity, nil, true); err != nil { + return nil, err + } + + // Return ID of both alias and entity + return &logical.Response{ + Data: map[string]interface{}{ + "id": alias.ID, + "canonical_id": entity.ID, + }, + }, nil +} + +func (i *IdentityStore) handleAliasUpdate(ctx context.Context, req *logical.Request, canonicalID, name, mountAccessor string, alias *identity.Alias) (*logical.Response, error) { + if name == alias.Name && + mountAccessor == alias.MountAccessor && + (canonicalID == alias.CanonicalID || canonicalID == "") { + // Nothing to do; return nil to be idempotent + return nil, nil + } + + alias.LastUpdateTime = ptypes.TimestampNow() + + // If we're changing one or the other or both of these, make sure that + // there isn't a matching alias already, and make sure it's in the same + // namespace. + if name != alias.Name || mountAccessor != alias.MountAccessor { + // Check here to see if such an alias already exists, if so bail + mountEntry := i.core.router.MatchingMountByAccessor(mountAccessor) + if mountEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", mountAccessor)), nil + } + if mountEntry.Local { + return logical.ErrorResponse(fmt.Sprintf("mount_accessor %q is of a local mount", mountAccessor)), nil + } + if mountEntry.NamespaceID != alias.NamespaceID { + return logical.ErrorResponse("given mount accessor is not in the same namespace as the existing alias"), logical.ErrPermissionDenied + } + + existingAlias, err := i.MemDBAliasByFactors(mountAccessor, name, false, false) + if err != nil { + return nil, err + } + // Bail unless it's just a case change + if existingAlias != nil && !strings.EqualFold(existingAlias.Name, name) { + return logical.ErrorResponse("alias with combination of mount accessor and name already exists"), nil + } + + // Update the values in the alias + alias.Name = name + alias.MountAccessor = mountAccessor + } + + // Get our current entity, which may be the same as the new one if the + // canonical ID hasn't changed + currentEntity, err := i.MemDBEntityByAliasID(alias.ID, true) + if err != nil { + return nil, err + } + if currentEntity == nil { + return logical.ErrorResponse("given alias is not associated with an entity"), nil + } + if currentEntity.NamespaceID != alias.NamespaceID { + return logical.ErrorResponse("alias associated with an entity in a different namespace"), logical.ErrPermissionDenied + } + + newEntity := currentEntity + if canonicalID != "" && canonicalID != alias.CanonicalID { + newEntity, err = i.MemDBEntityByID(canonicalID, true) + if err != nil { + return nil, err + } + if newEntity == nil { + return logical.ErrorResponse("given 'canonical_id' is not associated with an entity"), nil + } + if newEntity.NamespaceID != alias.NamespaceID { + return logical.ErrorResponse("given 'canonical_id' associated with entity in a different namespace from the alias"), logical.ErrPermissionDenied + } + + // Update the canonical ID value and move it from the current enitity to the new one + alias.CanonicalID = newEntity.ID + newEntity.Aliases = append(newEntity.Aliases, alias) + for aliasIndex, item := range currentEntity.Aliases { + if item.ID == alias.ID { + currentEntity.Aliases = append(currentEntity.Aliases[:aliasIndex], currentEntity.Aliases[aliasIndex+1:]...) + break + } + } + } else { + // If it's not moving we still need to update it in the existing + // entity's aliases + for aliasIndex, item := range currentEntity.Aliases { + if item.ID == alias.ID { + currentEntity.Aliases[aliasIndex] = alias + break + } + } + // newEntity will be pointing to the same entity; set currentEntity nil + // so the upsertCall gets nil for the previous entity as we're only + // changing one. + currentEntity = nil + } + + // Index entity and its aliases in MemDB and persist entity along with + // aliases in storage. If the alias is being transferred over from + // one entity to another, previous entity needs to get refreshed in MemDB + // and persisted in storage as well. + if err := i.upsertEntity(ctx, newEntity, currentEntity, true); err != nil { + return nil, err + } + + // Return ID of both alias and entity + return &logical.Response{ + Data: map[string]interface{}{ + "id": alias.ID, + "canonical_id": newEntity.ID, + }, + }, nil +} + +// pathAliasIDRead returns the properties of an alias for a given +// alias ID +func (i *IdentityStore) pathAliasIDRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + aliasID := d.Get("id").(string) + if aliasID == "" { + return logical.ErrorResponse("missing alias id"), nil + } + + alias, err := i.MemDBAliasByID(aliasID, false, false) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(ctx, alias) + } +} + +func (i *IdentityStore) handleAliasReadCommon(ctx context.Context, alias *identity.Alias) (*logical.Response, error) { + if alias == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != alias.NamespaceID { + return logical.ErrorResponse("alias and request are in different namespaces"), logical.ErrPermissionDenied + } + + respData := map[string]interface{}{} + respData["id"] = alias.ID + respData["canonical_id"] = alias.CanonicalID + respData["mount_accessor"] = alias.MountAccessor + respData["metadata"] = alias.Metadata + respData["name"] = alias.Name + respData["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs + respData["namespace_id"] = alias.NamespaceID + + if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil { + respData["mount_path"] = mountValidationResp.MountPath + respData["mount_type"] = mountValidationResp.MountType + } + + // Convert protobuf timestamp into RFC3339 format + respData["creation_time"] = ptypes.TimestampString(alias.CreationTime) + respData["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime) + + return &logical.Response{ + Data: respData, + }, nil +} + +// pathAliasIDDelete deletes the alias for a given alias ID +func (i *IdentityStore) pathAliasIDDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + aliasID := d.Get("id").(string) + if aliasID == "" { + return logical.ErrorResponse("missing alias ID"), nil + } + + i.lock.Lock() + defer i.lock.Unlock() + + // Create a MemDB transaction to delete entity + txn := i.db.Txn(true) + defer txn.Abort() + + // Fetch the alias + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, false) + if err != nil { + return nil, err + } + + // If there is no alias for the ID, do nothing + if alias == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != alias.NamespaceID { + return logical.ErrorResponse("request and alias are in different namespaces"), logical.ErrPermissionDenied + } + + // Fetch the associated entity + entity, err := i.MemDBEntityByAliasIDInTxn(txn, alias.ID, true) + if err != nil { + return nil, err + } + + // If there is no entity tied to a valid alias, something is wrong + if entity == nil { + return nil, fmt.Errorf("alias not associated to an entity") + } + + aliases := []*identity.Alias{ + alias, + } + + // Delete alias from the entity object + err = i.deleteAliasesInEntityInTxn(txn, entity, aliases) + if err != nil { + return nil, err + } + + // Update the entity index in the entities table + err = i.MemDBUpsertEntityInTxn(txn, entity) + if err != nil { + return nil, err + } + + // Persist the entity object + entityAsAny, err := ptypes.MarshalAny(entity) + if err != nil { + return nil, err + } + item := &storagepacker.Item{ + ID: entity.ID, + Message: entityAsAny, + } + + err = i.entityPacker.PutItem(ctx, item) + if err != nil { + return nil, err + } + + // Committing the transaction *after* successfully updating entity in + // storage + txn.Commit() + + return nil, nil + } +} + +// pathAliasIDList lists the IDs of all the valid aliases in the identity +// store +func (i *IdentityStore) pathAliasIDList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleAliasListCommon(ctx, false) + } +} + +var aliasHelp = map[string][2]string{ + "alias": { + "Create a new alias.", + "", + }, + "alias-id": { + "Update, read or delete an alias ID.", + "", + }, + "alias-id-list": { + "List all the alias IDs.", + "", + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go b/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go new file mode 100644 index 00000000..388b52fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go @@ -0,0 +1,775 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/errwrap" + memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/identity/mfa" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func entityPathFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the entity. If set, updates the corresponding existing entity.", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the entity", + }, + "metadata": { + Type: framework.TypeKVPairs, + Description: `Metadata to be associated with the entity. +In CLI, this parameter can be repeated multiple times, and it all gets merged together. +For example: +vault metadata=key1=value1 metadata=key2=value2 + `, + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Policies to be tied to the entity.", + }, + "disabled": { + Type: framework.TypeBool, + Description: "If set true, tokens tied to this identity will not be able to be used (but will not be revoked).", + }, + } +} + +// entityPaths returns the API endpoints supported to operate on entities. +// Following are the paths supported: +// entity - To register a new entity +// entity/id - To lookup, modify, delete and list entities based on ID +// entity/merge - To merge entities based on ID +func entityPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "entity$", + Fields: entityPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleEntityUpdateCommon(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity"][1]), + }, + { + Pattern: "entity/name/(?P.+)", + Fields: entityPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleEntityUpdateCommon(), + logical.ReadOperation: i.pathEntityNameRead(), + logical.DeleteOperation: i.pathEntityNameDelete(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity-name"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity-name"][1]), + }, + { + Pattern: "entity/id/" + framework.GenericNameRegex("id"), + Fields: entityPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleEntityUpdateCommon(), + logical.ReadOperation: i.pathEntityIDRead(), + logical.DeleteOperation: i.pathEntityIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity-id"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity-id"][1]), + }, + { + Pattern: "entity/name/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathEntityNameList(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity-name-list"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity-name-list"][1]), + }, + { + Pattern: "entity/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathEntityIDList(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity-id-list"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity-id-list"][1]), + }, + { + Pattern: "entity/merge/?$", + Fields: map[string]*framework.FieldSchema{ + "from_entity_ids": { + Type: framework.TypeCommaStringSlice, + Description: "Entity IDs which needs to get merged", + }, + "to_entity_id": { + Type: framework.TypeString, + Description: "Entity ID into which all the other entities need to get merged", + }, + "force": { + Type: framework.TypeBool, + Description: "Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the same type both in entities that are merged from and in entity into which all others are getting merged, secrets in the destination will be unaltered. If not set, this API will throw an error containing all the conflicts.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathEntityMergeID(), + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["entity-merge-id"][0]), + HelpDescription: strings.TrimSpace(entityHelp["entity-merge-id"][1]), + }, + } +} + +// pathEntityMergeID merges two or more entities into a single entity +func (i *IdentityStore) pathEntityMergeID() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + toEntityID := d.Get("to_entity_id").(string) + if toEntityID == "" { + return logical.ErrorResponse("missing entity id to merge to"), nil + } + + fromEntityIDs := d.Get("from_entity_ids").([]string) + if len(fromEntityIDs) == 0 { + return logical.ErrorResponse("missing entity ids to merge from"), nil + } + + force := d.Get("force").(bool) + + // Create a MemDB transaction to merge entities + txn := i.db.Txn(true) + defer txn.Abort() + + toEntity, err := i.MemDBEntityByID(toEntityID, true) + if err != nil { + return nil, err + } + + userErr, intErr := i.mergeEntity(ctx, txn, toEntity, fromEntityIDs, force, true, false, true) + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + if intErr != nil { + return nil, intErr + } + + // Committing the transaction *after* successfully performing storage + // persistence + txn.Commit() + + return nil, nil + } +} + +// handleEntityUpdateCommon is used to update an entity +func (i *IdentityStore) handleEntityUpdateCommon() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + i.lock.Lock() + defer i.lock.Unlock() + + entity := new(identity.Entity) + var err error + + entityID := d.Get("id").(string) + if entityID != "" { + entity, err = i.MemDBEntityByID(entityID, true) + if err != nil { + return nil, err + } + if entity == nil { + return logical.ErrorResponse("entity not found from id"), nil + } + } + + // Get the name + entityName := d.Get("name").(string) + if entityName != "" { + entityByName, err := i.MemDBEntityByName(ctx, entityName, false) + if err != nil { + return nil, err + } + switch { + case entityByName == nil: + // Not found, safe to use this name with an existing or new entity + case entity.ID == "": + // Entity by ID was not found, but and entity for the supplied + // name was found. Continue updating the entity. + entity = entityByName + case entity.ID == entityByName.ID: + // Same exact entity, carry on (this is basically a noop then) + default: + return logical.ErrorResponse("entity name is already in use"), nil + } + } + + if entityName != "" { + entity.Name = entityName + } + + // Update the policies if supplied + entityPoliciesRaw, ok := d.GetOk("policies") + if ok { + entity.Policies = entityPoliciesRaw.([]string) + } + + if strutil.StrListContains(entity.Policies, "root") { + return logical.ErrorResponse("policies cannot contain root"), nil + } + + disabledRaw, ok := d.GetOk("disabled") + if ok { + entity.Disabled = disabledRaw.(bool) + } + + // Get entity metadata + metadata, ok, err := d.GetOkErr("metadata") + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil + } + if ok { + entity.Metadata = metadata.(map[string]string) + } + + // At this point, if entity.ID is empty, it indicates that a new entity + // is being created. Using this to respond data in the response. + newEntity := entity.ID == "" + + // ID creation and some validations + err = i.sanitizeEntity(ctx, entity) + if err != nil { + return nil, err + } + + if err := i.upsertEntity(ctx, entity, nil, true); err != nil { + return nil, err + } + + // If this operation was an update to an existing entity, return 204 + if !newEntity { + return nil, nil + } + + // Prepare the response + respData := map[string]interface{}{ + "id": entity.ID, + "name": entity.Name, + } + + var aliasIDs []string + for _, alias := range entity.Aliases { + aliasIDs = append(aliasIDs, alias.ID) + } + + respData["aliases"] = aliasIDs + + // Return ID of the entity that was either created or updated along with + // its aliases + return &logical.Response{ + Data: respData, + }, nil + } +} + +// pathEntityNameRead returns the properties of an entity for a given entity ID +func (i *IdentityStore) pathEntityNameRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entityName := d.Get("name").(string) + if entityName == "" { + return logical.ErrorResponse("missing entity name"), nil + } + + entity, err := i.MemDBEntityByName(ctx, entityName, false) + if err != nil { + return nil, err + } + if entity == nil { + return nil, nil + } + + return i.handleEntityReadCommon(ctx, entity) + } +} + +// pathEntityIDRead returns the properties of an entity for a given entity ID +func (i *IdentityStore) pathEntityIDRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entityID := d.Get("id").(string) + if entityID == "" { + return logical.ErrorResponse("missing entity id"), nil + } + + entity, err := i.MemDBEntityByID(entityID, false) + if err != nil { + return nil, err + } + if entity == nil { + return nil, nil + } + + return i.handleEntityReadCommon(ctx, entity) + } +} + +func (i *IdentityStore) handleEntityReadCommon(ctx context.Context, entity *identity.Entity) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != entity.NamespaceID { + return nil, nil + } + + respData := map[string]interface{}{} + respData["id"] = entity.ID + respData["name"] = entity.Name + respData["metadata"] = entity.Metadata + respData["merged_entity_ids"] = entity.MergedEntityIDs + respData["policies"] = entity.Policies + respData["disabled"] = entity.Disabled + respData["namespace_id"] = entity.NamespaceID + + // Convert protobuf timestamp into RFC3339 format + respData["creation_time"] = ptypes.TimestampString(entity.CreationTime) + respData["last_update_time"] = ptypes.TimestampString(entity.LastUpdateTime) + + // Convert each alias into a map and replace the time format in each + aliasesToReturn := make([]interface{}, len(entity.Aliases)) + for aliasIdx, alias := range entity.Aliases { + aliasMap := map[string]interface{}{} + aliasMap["id"] = alias.ID + aliasMap["canonical_id"] = alias.CanonicalID + aliasMap["mount_accessor"] = alias.MountAccessor + aliasMap["metadata"] = alias.Metadata + aliasMap["name"] = alias.Name + aliasMap["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs + aliasMap["creation_time"] = ptypes.TimestampString(alias.CreationTime) + aliasMap["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime) + + if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil { + aliasMap["mount_type"] = mountValidationResp.MountType + aliasMap["mount_path"] = mountValidationResp.MountPath + } + + aliasesToReturn[aliasIdx] = aliasMap + } + + // Add the aliases information to the response which has the correct time + // formats + respData["aliases"] = aliasesToReturn + + addExtraEntityDataToResponse(entity, respData) + + // Fetch the groups this entity belongs to and return their identifiers + groups, inheritedGroups, err := i.groupsByEntityID(entity.ID) + if err != nil { + return nil, err + } + + groupIDs := make([]string, len(groups)) + for i, group := range groups { + groupIDs[i] = group.ID + } + respData["direct_group_ids"] = groupIDs + + inheritedGroupIDs := make([]string, len(inheritedGroups)) + for i, group := range inheritedGroups { + inheritedGroupIDs[i] = group.ID + } + respData["inherited_group_ids"] = inheritedGroupIDs + + respData["group_ids"] = append(groupIDs, inheritedGroupIDs...) + + return &logical.Response{ + Data: respData, + }, nil +} + +// pathEntityIDDelete deletes the entity for a given entity ID +func (i *IdentityStore) pathEntityIDDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entityID := d.Get("id").(string) + if entityID == "" { + return logical.ErrorResponse("missing entity id"), nil + } + + i.lock.Lock() + defer i.lock.Unlock() + + // Create a MemDB transaction to delete entity + txn := i.db.Txn(true) + defer txn.Abort() + + // Fetch the entity using its ID + entity, err := i.MemDBEntityByIDInTxn(txn, entityID, true) + if err != nil { + return nil, err + } + if entity == nil { + return nil, nil + } + + err = i.handleEntityDeleteCommon(ctx, txn, entity) + if err != nil { + return nil, err + } + + txn.Commit() + + return nil, nil + } +} + +// pathEntityNameDelete deletes the entity for a given entity ID +func (i *IdentityStore) pathEntityNameDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entityName := d.Get("name").(string) + if entityName == "" { + return logical.ErrorResponse("missing entity name"), nil + } + + i.lock.Lock() + defer i.lock.Unlock() + + // Create a MemDB transaction to delete entity + txn := i.db.Txn(true) + defer txn.Abort() + + // Fetch the entity using its name + entity, err := i.MemDBEntityByNameInTxn(ctx, txn, entityName, true) + if err != nil { + return nil, err + } + // If there is no entity for the ID, do nothing + if entity == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if entity.NamespaceID != ns.ID { + return nil, nil + } + + err = i.handleEntityDeleteCommon(ctx, txn, entity) + if err != nil { + return nil, err + } + + txn.Commit() + + return nil, nil + } +} + +func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb.Txn, entity *identity.Entity) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + if entity.NamespaceID != ns.ID { + return nil + } + + // Remove entity ID as a member from all the groups it belongs, both + // internal and external + groups, err := i.MemDBGroupsByMemberEntityIDInTxn(txn, entity.ID, true, false) + if err != nil { + return nil + } + + for _, group := range groups { + group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entity.ID) + err = i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return err + } + } + + // Delete all the aliases in the entity and the respective indexes + err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases) + if err != nil { + return err + } + + // Delete the entity using the same transaction + err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID) + if err != nil { + return err + } + + // Delete the entity from storage + err = i.entityPacker.DeleteItem(ctx, entity.ID) + if err != nil { + return err + } + + return nil +} + +func (i *IdentityStore) pathEntityIDList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handlePathEntityListCommon(ctx, req, d, true) + } +} + +func (i *IdentityStore) pathEntityNameList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handlePathEntityListCommon(ctx, req, d, false) + } +} + +// handlePathEntityListCommon lists the IDs or names of all the valid entities +// in the identity store +func (i *IdentityStore) handlePathEntityListCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, byID bool) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + ws := memdb.NewWatchSet() + + txn := i.db.Txn(false) + + iter, err := txn.Get(entitiesTable, "namespace_id", ns.ID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch iterator for entities in memdb: {{err}}", err) + } + + ws.Add(iter.WatchCh()) + + var keys []string + entityInfo := map[string]interface{}{} + + type mountInfo struct { + MountType string + MountPath string + } + mountAccessorMap := map[string]mountInfo{} + + for { + raw := iter.Next() + if raw == nil { + break + } + entity := raw.(*identity.Entity) + if byID { + keys = append(keys, entity.ID) + } else { + keys = append(keys, entity.Name) + } + entityInfoEntry := map[string]interface{}{ + "name": entity.Name, + } + if len(entity.Aliases) > 0 { + aliasList := make([]interface{}, 0, len(entity.Aliases)) + for _, alias := range entity.Aliases { + entry := map[string]interface{}{ + "id": alias.ID, + "name": alias.Name, + "mount_accessor": alias.MountAccessor, + } + + mi, ok := mountAccessorMap[alias.MountAccessor] + if ok { + entry["mount_type"] = mi.MountType + entry["mount_path"] = mi.MountPath + } else { + mi = mountInfo{} + if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil { + mi.MountType = mountValidationResp.MountType + mi.MountPath = mountValidationResp.MountPath + entry["mount_type"] = mi.MountType + entry["mount_path"] = mi.MountPath + } + mountAccessorMap[alias.MountAccessor] = mi + } + + aliasList = append(aliasList, entry) + } + entityInfoEntry["aliases"] = aliasList + } + entityInfo[entity.ID] = entityInfoEntry + } + + return logical.ListResponseWithInfo(keys, entityInfo), nil +} + +func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntity *identity.Entity, fromEntityIDs []string, force, grabLock, mergePolicies, persist bool) (error, error) { + if grabLock { + i.lock.Lock() + defer i.lock.Unlock() + } + + if toEntity == nil { + return errors.New("entity id to merge to is invalid"), nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if toEntity.NamespaceID != ns.ID { + return errors.New("entity id to merge into does not belong to the request's namespace"), nil + } + + // Merge the MFA secrets + for _, fromEntityID := range fromEntityIDs { + if fromEntityID == toEntity.ID { + return errors.New("to_entity_id should not be present in from_entity_ids"), nil + } + + fromEntity, err := i.MemDBEntityByID(fromEntityID, false) + if err != nil { + return nil, err + } + + if fromEntity == nil { + return errors.New("entity id to merge from is invalid"), nil + } + + if fromEntity.NamespaceID != toEntity.NamespaceID { + return errors.New("entity id to merge from does not belong to this namespace"), nil + } + + for configID, configSecret := range fromEntity.MFASecrets { + _, ok := toEntity.MFASecrets[configID] + if ok && !force { + return nil, fmt.Errorf("conflicting MFA config ID %q in entity ID %q", configID, fromEntity.ID) + } else { + if toEntity.MFASecrets == nil { + toEntity.MFASecrets = make(map[string]*mfa.Secret) + } + toEntity.MFASecrets[configID] = configSecret + } + } + } + + isPerfSecondaryOrStandby := i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) || i.core.perfStandby + for _, fromEntityID := range fromEntityIDs { + if fromEntityID == toEntity.ID { + return errors.New("to_entity_id should not be present in from_entity_ids"), nil + } + + fromEntity, err := i.MemDBEntityByID(fromEntityID, false) + if err != nil { + return nil, err + } + + if fromEntity == nil { + return errors.New("entity id to merge from is invalid"), nil + } + + if fromEntity.NamespaceID != toEntity.NamespaceID { + return errors.New("entity id to merge from does not belong to this namespace"), nil + } + + for _, alias := range fromEntity.Aliases { + // Set the desired canonical ID + alias.CanonicalID = toEntity.ID + + alias.MergedFromCanonicalIDs = append(alias.MergedFromCanonicalIDs, fromEntity.ID) + + err = i.MemDBUpsertAliasInTxn(txn, alias, false) + if err != nil { + return nil, errwrap.Wrapf("failed to update alias during merge: {{err}}", err) + } + + // Add the alias to the desired entity + toEntity.Aliases = append(toEntity.Aliases, alias) + } + + // If told to, merge policies + if mergePolicies { + toEntity.Policies = strutil.MergeSlices(toEntity.Policies, fromEntity.Policies) + } + + // If the entity from which we are merging from was already a merged + // entity, transfer over the Merged set to the entity we are + // merging into. + toEntity.MergedEntityIDs = append(toEntity.MergedEntityIDs, fromEntity.MergedEntityIDs...) + + // Add the entity from which we are merging from to the list of entities + // the entity we are merging into is composed of. + toEntity.MergedEntityIDs = append(toEntity.MergedEntityIDs, fromEntity.ID) + + // Delete the entity which we are merging from in MemDB using the same transaction + err = i.MemDBDeleteEntityByIDInTxn(txn, fromEntity.ID) + if err != nil { + return nil, err + } + + if persist && !isPerfSecondaryOrStandby { + // Delete the entity which we are merging from in storage + err = i.entityPacker.DeleteItem(ctx, fromEntity.ID) + if err != nil { + return nil, err + } + } + } + + // Update MemDB with changes to the entity we are merging to + err = i.MemDBUpsertEntityInTxn(txn, toEntity) + if err != nil { + return nil, err + } + + if persist && !isPerfSecondaryOrStandby { + // Persist the entity which we are merging to + toEntityAsAny, err := ptypes.MarshalAny(toEntity) + if err != nil { + return nil, err + } + item := &storagepacker.Item{ + ID: toEntity.ID, + Message: toEntityAsAny, + } + + err = i.entityPacker.PutItem(ctx, item) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +var entityHelp = map[string][2]string{ + "entity": { + "Create a new entity", + "", + }, + "entity-id": { + "Update, read or delete an entity using entity ID", + "", + }, + "entity-name": { + "Update, read or delete an entity using entity name", + "", + }, + "entity-id-list": { + "List all the entity IDs", + "", + }, + "entity-name-list": { + "List all the entity names", + "", + }, + "entity-merge-id": { + "Merge two or more entities together", + "", + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go b/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go new file mode 100644 index 00000000..3e1cfc48 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go @@ -0,0 +1,367 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func groupAliasPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "group-alias$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the group alias.", + }, + "name": { + Type: framework.TypeString, + Description: "Alias of the group.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the group to which this is an alias.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupAliasRegister(), + }, + + HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias"][0]), + HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias"][1]), + }, + { + Pattern: "group-alias/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the group alias.", + }, + "name": { + Type: framework.TypeString, + Description: "Alias of the group.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the group to which this is an alias.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupAliasIDUpdate(), + logical.ReadOperation: i.pathGroupAliasIDRead(), + logical.DeleteOperation: i.pathGroupAliasIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias-by-id"][0]), + HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias-by-id"][1]), + }, + { + Pattern: "group-alias/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathGroupAliasIDList(), + }, + + HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias-id-list"][0]), + HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias-id-list"][1]), + }, + } +} + +func (i *IdentityStore) pathGroupAliasRegister() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + _, ok := d.GetOk("id") + if ok { + return i.pathGroupAliasIDUpdate()(ctx, req, d) + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + return i.handleGroupAliasUpdateCommon(ctx, req, d, nil) + } +} + +func (i *IdentityStore) pathGroupAliasIDUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("empty group alias ID"), nil + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + groupAlias, err := i.MemDBAliasByID(groupAliasID, true, true) + if err != nil { + return nil, err + } + if groupAlias == nil { + return logical.ErrorResponse("invalid group alias ID"), nil + } + + return i.handleGroupAliasUpdateCommon(ctx, req, d, groupAlias) + } +} + +// NOTE: Currently we don't allow by-factors modification of group aliases the +// way we do with entities. As a result if a groupAlias is defined here we know +// that this is an update, where they provided an ID parameter. +func (i *IdentityStore) handleGroupAliasUpdateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, groupAlias *identity.Alias) (*logical.Response, error) { + var newGroup, previousGroup *identity.Group + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + if groupAlias == nil { + groupAlias = &identity.Alias{ + CreationTime: ptypes.TimestampNow(), + NamespaceID: ns.ID, + } + groupAlias.LastUpdateTime = groupAlias.CreationTime + } else { + if ns.ID != groupAlias.NamespaceID { + return logical.ErrorResponse("existing alias not in the same namespace as request"), logical.ErrPermissionDenied + } + groupAlias.LastUpdateTime = ptypes.TimestampNow() + if groupAlias.CreationTime == nil { + groupAlias.CreationTime = groupAlias.LastUpdateTime + } + } + + // Get group alias name + name := d.Get("name").(string) + if name == "" { + return logical.ErrorResponse("missing alias name"), nil + } + + mountAccessor := d.Get("mount_accessor").(string) + if mountAccessor == "" { + return logical.ErrorResponse("missing mount_accessor"), nil + } + + canonicalID := d.Get("canonical_id").(string) + + if groupAlias.Name == name && groupAlias.MountAccessor == mountAccessor && (canonicalID == "" || groupAlias.CanonicalID == canonicalID) { + // Nothing to do, be idempotent + return nil, nil + } + + // Explicitly correct for previous versions that persisted this + groupAlias.MountType = "" + + // Canonical ID handling + { + if canonicalID != "" { + newGroup, err = i.MemDBGroupByID(canonicalID, true) + if err != nil { + return nil, err + } + if newGroup == nil { + return logical.ErrorResponse("invalid group ID given in 'canonical_id'"), nil + } + if newGroup.Type != groupTypeExternal { + return logical.ErrorResponse("alias can't be set on an internal group"), nil + } + if newGroup.NamespaceID != groupAlias.NamespaceID { + return logical.ErrorResponse("group referenced with 'canonical_id' not in the same namespace as alias"), logical.ErrPermissionDenied + } + groupAlias.CanonicalID = canonicalID + } + } + + // Validate name/accessor whether new or update + { + mountEntry := i.core.router.MatchingMountByAccessor(mountAccessor) + if mountEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", mountAccessor)), nil + } + if mountEntry.Local { + return logical.ErrorResponse(fmt.Sprintf("mount accessor %q is a local mount", mountAccessor)), nil + } + if mountEntry.NamespaceID != groupAlias.NamespaceID { + return logical.ErrorResponse("mount referenced via 'mount_accessor' not in the same namespace as alias"), logical.ErrPermissionDenied + } + + groupAliasByFactors, err := i.MemDBAliasByFactors(mountEntry.Accessor, name, false, true) + if err != nil { + return nil, err + } + // This check will still work for the new case too since it won't have + // an ID yet + if groupAliasByFactors != nil && groupAliasByFactors.ID != groupAlias.ID { + return logical.ErrorResponse("combination of mount and group alias name is already in use"), nil + } + + groupAlias.Name = name + groupAlias.MountAccessor = mountAccessor + } + + switch groupAlias.ID { + case "": + // It's a new alias + if newGroup == nil { + // If this is a new alias being tied to a non-existent group, + // create a new group for it + newGroup = &identity.Group{ + Type: groupTypeExternal, + } + } + + default: + // Fetch the group, if any, to which the alias is tied to + previousGroup, err = i.MemDBGroupByAliasID(groupAlias.ID, true) + if err != nil { + return nil, err + } + if previousGroup == nil { + return nil, fmt.Errorf("group alias is not associated with a group") + } + if previousGroup.NamespaceID != groupAlias.NamespaceID { + return logical.ErrorResponse("previous group found for alias not in the same namespace as alias"), logical.ErrPermissionDenied + } + + if newGroup == nil || newGroup.ID == previousGroup.ID { + // If newGroup is nil they didn't specify a canonical ID, so they + // aren't trying to update it; set the existing group as the "new" + // one. If it's the same ID they specified the same canonical ID, + // so follow the same behavior. + newGroup = previousGroup + previousGroup = nil + } else { + // The alias is moving, so nil out the previous group alias + previousGroup.Alias = nil + } + } + + newGroup.Alias = groupAlias + err = i.sanitizeAndUpsertGroup(ctx, newGroup, previousGroup, nil) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "id": groupAlias.ID, + "canonical_id": newGroup.ID, + }, + }, nil +} + +// pathGroupAliasIDRead returns the properties of an alias for a given +// alias ID +func (i *IdentityStore) pathGroupAliasIDRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("empty group alias id"), nil + } + + groupAlias, err := i.MemDBAliasByID(groupAliasID, false, true) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(ctx, groupAlias) + } +} + +// pathGroupAliasIDDelete deletes the group's alias for a given group alias ID +func (i *IdentityStore) pathGroupAliasIDDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("missing group alias ID"), nil + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + txn := i.db.Txn(true) + defer txn.Abort() + + alias, err := i.MemDBAliasByIDInTxn(txn, groupAliasID, false, true) + if err != nil { + return nil, err + } + + if alias == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != alias.NamespaceID { + return logical.ErrorResponse("request namespace is not the same as the group alias namespace"), logical.ErrPermissionDenied + } + + group, err := i.MemDBGroupByAliasIDInTxn(txn, alias.ID, true) + if err != nil { + return nil, err + } + + // If there is no group tied to a valid alias, something is wrong + if group == nil { + return nil, fmt.Errorf("alias not associated to a group") + } + + // Delete group alias in memdb + err = i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true) + if err != nil { + return nil, err + } + + // Delete the alias + group.Alias = nil + + err = i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return nil, err + } + + txn.Commit() + + return nil, nil + } +} + +// pathGroupAliasIDList lists the IDs of all the valid group aliases in the +// identity store +func (i *IdentityStore) pathGroupAliasIDList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleAliasListCommon(ctx, true) + } +} + +var groupAliasHelp = map[string][2]string{ + "group-alias": { + "Creates a new group alias, or updates an existing one.", + "", + }, + "group-alias-id": { + "Update, read or delete a group alias using ID.", + "", + }, + "group-alias-id-list": { + "List all the group alias IDs.", + "", + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go b/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go new file mode 100644 index 00000000..074a4ba4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go @@ -0,0 +1,548 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + groupTypeInternal = "internal" + groupTypeExternal = "external" +) + +func groupPathFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the group. If set, updates the corresponding existing group.", + }, + "type": { + Type: framework.TypeString, + Description: "Type of the group, 'internal' or 'external'. Defaults to 'internal'", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the group.", + }, + "metadata": { + Type: framework.TypeKVPairs, + Description: `Metadata to be associated with the group. +In CLI, this parameter can be repeated multiple times, and it all gets merged together. +For example: +vault metadata=key1=value1 metadata=key2=value2 + `, + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Policies to be tied to the group.", + }, + "member_group_ids": { + Type: framework.TypeCommaStringSlice, + Description: "Group IDs to be assigned as group members.", + }, + "member_entity_ids": { + Type: framework.TypeCommaStringSlice, + Description: "Entity IDs to be assigned as group members.", + }, + } +} + +func groupPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "group$", + Fields: groupPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupRegister(), + }, + + HelpSynopsis: strings.TrimSpace(groupHelp["register"][0]), + HelpDescription: strings.TrimSpace(groupHelp["register"][1]), + }, + { + Pattern: "group/id/" + framework.GenericNameRegex("id"), + Fields: groupPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupIDUpdate(), + logical.ReadOperation: i.pathGroupIDRead(), + logical.DeleteOperation: i.pathGroupIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(groupHelp["group-by-id"][0]), + HelpDescription: strings.TrimSpace(groupHelp["group-by-id"][1]), + }, + { + Pattern: "group/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathGroupIDList(), + }, + + HelpSynopsis: strings.TrimSpace(groupHelp["group-id-list"][0]), + HelpDescription: strings.TrimSpace(groupHelp["group-id-list"][1]), + }, + { + Pattern: "group/name/(?P.+)", + Fields: groupPathFields(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupNameUpdate(), + logical.ReadOperation: i.pathGroupNameRead(), + logical.DeleteOperation: i.pathGroupNameDelete(), + }, + + HelpSynopsis: strings.TrimSpace(groupHelp["group-by-name"][0]), + HelpDescription: strings.TrimSpace(groupHelp["group-by-name"][1]), + }, + { + Pattern: "group/name/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathGroupNameList(), + }, + + HelpSynopsis: strings.TrimSpace(groupHelp["group-name-list"][0]), + HelpDescription: strings.TrimSpace(groupHelp["group-name-list"][1]), + }, + } +} + +func (i *IdentityStore) pathGroupRegister() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + _, ok := d.GetOk("id") + if ok { + return i.pathGroupIDUpdate()(ctx, req, d) + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + return i.handleGroupUpdateCommon(ctx, req, d, nil) + } +} + +func (i *IdentityStore) pathGroupIDUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupID := d.Get("id").(string) + if groupID == "" { + return logical.ErrorResponse("empty group ID"), nil + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + group, err := i.MemDBGroupByID(groupID, true) + if err != nil { + return nil, err + } + if group == nil { + return logical.ErrorResponse("invalid group ID"), nil + } + + return i.handleGroupUpdateCommon(ctx, req, d, group) + } +} + +func (i *IdentityStore) pathGroupNameUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupName := d.Get("name").(string) + if groupName == "" { + return logical.ErrorResponse("empty group name"), nil + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + group, err := i.MemDBGroupByName(ctx, groupName, true) + if err != nil { + return nil, err + } + return i.handleGroupUpdateCommon(ctx, req, d, group) + } +} + +func (i *IdentityStore) handleGroupUpdateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, group *identity.Group) (*logical.Response, error) { + var newGroup bool + if group == nil { + group = new(identity.Group) + newGroup = true + } + + // Update the policies if supplied + policiesRaw, ok := d.GetOk("policies") + if ok { + group.Policies = policiesRaw.([]string) + } + + if strutil.StrListContains(group.Policies, "root") { + return logical.ErrorResponse("policies cannot contain root"), nil + } + + groupTypeRaw, ok := d.GetOk("type") + if ok { + groupType := groupTypeRaw.(string) + if group.Type != "" && groupType != group.Type { + return logical.ErrorResponse(fmt.Sprintf("group type cannot be changed")), nil + } + + group.Type = groupType + } + + // If group type is not set, default to internal type + if group.Type == "" { + group.Type = groupTypeInternal + } + + if group.Type != groupTypeInternal && group.Type != groupTypeExternal { + return logical.ErrorResponse(fmt.Sprintf("invalid group type %q", group.Type)), nil + } + + // Get the name + groupName := d.Get("name").(string) + if groupName != "" { + // Check if there is a group already existing for the given name + groupByName, err := i.MemDBGroupByName(ctx, groupName, false) + if err != nil { + return nil, err + } + + // If this is a new group and if there already exists a group by this + // name, error out. If the name of an existing group is about to be + // modified into something which is already tied to a different group, + // error out. + switch { + case groupByName == nil: + // Allowed + case group.ID == "": + group = groupByName + case group.ID != "" && groupByName.ID != group.ID: + return logical.ErrorResponse("group name is already in use"), nil + } + group.Name = groupName + } + + metadata, ok, err := d.GetOkErr("metadata") + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil + } + if ok { + group.Metadata = metadata.(map[string]string) + } + + memberEntityIDsRaw, ok := d.GetOk("member_entity_ids") + if ok { + if group.Type == groupTypeExternal { + return logical.ErrorResponse("member entities can't be set manually for external groups"), nil + } + group.MemberEntityIDs = memberEntityIDsRaw.([]string) + } + + memberGroupIDsRaw, ok := d.GetOk("member_group_ids") + var memberGroupIDs []string + if ok { + if group.Type == groupTypeExternal { + return logical.ErrorResponse("member groups can't be set for external groups"), nil + } + memberGroupIDs = memberGroupIDsRaw.([]string) + } + + err = i.sanitizeAndUpsertGroup(ctx, group, nil, memberGroupIDs) + if err != nil { + return nil, err + } + + if !newGroup { + return nil, nil + } + + respData := map[string]interface{}{ + "id": group.ID, + "name": group.Name, + } + return &logical.Response{ + Data: respData, + }, nil +} + +func (i *IdentityStore) pathGroupIDRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupID := d.Get("id").(string) + if groupID == "" { + return logical.ErrorResponse("empty group id"), nil + } + + group, err := i.MemDBGroupByID(groupID, false) + if err != nil { + return nil, err + } + if group == nil { + return nil, nil + } + + return i.handleGroupReadCommon(ctx, group) + } +} + +func (i *IdentityStore) pathGroupNameRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupName := d.Get("name").(string) + if groupName == "" { + return logical.ErrorResponse("empty group name"), nil + } + + group, err := i.MemDBGroupByName(ctx, groupName, false) + if err != nil { + return nil, err + } + if group == nil { + return nil, nil + } + + return i.handleGroupReadCommon(ctx, group) + } +} + +func (i *IdentityStore) handleGroupReadCommon(ctx context.Context, group *identity.Group) (*logical.Response, error) { + if group == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != group.NamespaceID { + return logical.ErrorResponse("request namespace is not the same as the group namespace"), logical.ErrPermissionDenied + } + + respData := map[string]interface{}{} + respData["id"] = group.ID + respData["name"] = group.Name + respData["policies"] = group.Policies + respData["member_entity_ids"] = group.MemberEntityIDs + respData["parent_group_ids"] = group.ParentGroupIDs + respData["metadata"] = group.Metadata + respData["creation_time"] = ptypes.TimestampString(group.CreationTime) + respData["last_update_time"] = ptypes.TimestampString(group.LastUpdateTime) + respData["modify_index"] = group.ModifyIndex + respData["type"] = group.Type + respData["namespace_id"] = group.NamespaceID + + aliasMap := map[string]interface{}{} + if group.Alias != nil { + aliasMap["id"] = group.Alias.ID + aliasMap["canonical_id"] = group.Alias.CanonicalID + aliasMap["mount_accessor"] = group.Alias.MountAccessor + aliasMap["metadata"] = group.Alias.Metadata + aliasMap["name"] = group.Alias.Name + aliasMap["merged_from_canonical_ids"] = group.Alias.MergedFromCanonicalIDs + aliasMap["creation_time"] = ptypes.TimestampString(group.Alias.CreationTime) + aliasMap["last_update_time"] = ptypes.TimestampString(group.Alias.LastUpdateTime) + + if mountValidationResp := i.core.router.validateMountByAccessor(group.Alias.MountAccessor); mountValidationResp != nil { + aliasMap["mount_path"] = mountValidationResp.MountPath + aliasMap["mount_type"] = mountValidationResp.MountType + } + } + + respData["alias"] = aliasMap + + var memberGroupIDs []string + memberGroups, err := i.MemDBGroupsByParentGroupID(group.ID, false) + if err != nil { + return nil, err + } + for _, memberGroup := range memberGroups { + memberGroupIDs = append(memberGroupIDs, memberGroup.ID) + } + + respData["member_group_ids"] = memberGroupIDs + + return &logical.Response{ + Data: respData, + }, nil +} + +func (i *IdentityStore) pathGroupIDDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupID := d.Get("id").(string) + if groupID == "" { + return logical.ErrorResponse("empty group ID"), nil + } + + return i.handleGroupDeleteCommon(ctx, groupID, true) + } +} + +func (i *IdentityStore) pathGroupNameDelete() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupName := d.Get("name").(string) + if groupName == "" { + return logical.ErrorResponse("empty group name"), nil + } + + return i.handleGroupDeleteCommon(ctx, groupName, false) + } +} + +func (i *IdentityStore) handleGroupDeleteCommon(ctx context.Context, key string, byID bool) (*logical.Response, error) { + // Acquire the lock to modify the group storage entry + i.groupLock.Lock() + defer i.groupLock.Unlock() + + // Create a MemDB transaction to delete group + txn := i.db.Txn(true) + defer txn.Abort() + + var group *identity.Group + var err error + switch byID { + case true: + group, err = i.MemDBGroupByIDInTxn(txn, key, false) + if err != nil { + return nil, err + } + default: + group, err = i.MemDBGroupByNameInTxn(ctx, txn, key, false) + if err != nil { + return nil, err + } + } + if group == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if group.NamespaceID != ns.ID { + return logical.ErrorResponse("request namespace is not the same as the group namespace"), logical.ErrPermissionDenied + } + + // Delete group alias from memdb + if group.Type == groupTypeExternal && group.Alias != nil { + err = i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true) + if err != nil { + return nil, err + } + } + + // Delete the group using the same transaction + err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID) + if err != nil { + return nil, err + } + + // Delete the group from storage + err = i.groupPacker.DeleteItem(ctx, group.ID) + if err != nil { + return nil, err + } + + // Committing the transaction *after* successfully deleting group + txn.Commit() + + return nil, nil +} + +// pathGroupIDList lists the IDs of all the groups in the identity store +func (i *IdentityStore) pathGroupIDList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleGroupListCommon(ctx, true) + } +} + +// pathGroupNameList lists the names of all the groups in the identity store +func (i *IdentityStore) pathGroupNameList() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleGroupListCommon(ctx, false) + } +} + +func (i *IdentityStore) handleGroupListCommon(ctx context.Context, byID bool) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + txn := i.db.Txn(false) + + iter, err := txn.Get(groupsTable, "namespace_id", ns.ID) + if err != nil { + return nil, errwrap.Wrapf("failed to lookup groups using namespace ID: {{err}}", err) + } + + var keys []string + groupInfo := map[string]interface{}{} + + type mountInfo struct { + MountType string + MountPath string + } + mountAccessorMap := map[string]mountInfo{} + + for entry := iter.Next(); entry != nil; entry = iter.Next() { + group := entry.(*identity.Group) + + if byID { + keys = append(keys, group.ID) + } else { + keys = append(keys, group.Name) + } + + groupInfoEntry := map[string]interface{}{ + "name": group.Name, + "num_member_entities": len(group.MemberEntityIDs), + "num_parent_groups": len(group.ParentGroupIDs), + } + if group.Alias != nil { + entry := map[string]interface{}{ + "id": group.Alias.ID, + "name": group.Alias.Name, + "mount_accessor": group.Alias.MountAccessor, + } + + mi, ok := mountAccessorMap[group.Alias.MountAccessor] + if ok { + entry["mount_type"] = mi.MountType + entry["mount_path"] = mi.MountPath + } else { + mi = mountInfo{} + if mountValidationResp := i.core.router.validateMountByAccessor(group.Alias.MountAccessor); mountValidationResp != nil { + mi.MountType = mountValidationResp.MountType + mi.MountPath = mountValidationResp.MountPath + entry["mount_type"] = mi.MountType + entry["mount_path"] = mi.MountPath + } + mountAccessorMap[group.Alias.MountAccessor] = mi + } + + groupInfoEntry["alias"] = entry + } + groupInfo[group.ID] = groupInfoEntry + } + + return logical.ListResponseWithInfo(keys, groupInfo), nil +} + +var groupHelp = map[string][2]string{ + "register": { + "Create a new group.", + "", + }, + "group-by-id": { + "Update or delete an existing group using its ID.", + "", + }, + "group-id-list": { + "List all the group IDs.", + "", + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_oidc.go b/vendor/github.com/hashicorp/vault/vault/identity_store_oidc.go new file mode 100644 index 00000000..8804ad74 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_oidc.go @@ -0,0 +1,1662 @@ +package vault + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/patrickmn/go-cache" + "golang.org/x/crypto/ed25519" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" +) + +type oidcConfig struct { + Issuer string `json:"issuer"` + + // effectiveIssuer is a calculated field and will be either Issuer (if + // that's set) or the Vault instance's api_addr. + effectiveIssuer string +} + +type expireableKey struct { + KeyID string `json:"key_id"` + ExpireAt time.Time `json:"expire_at"` +} + +type namedKey struct { + name string + Algorithm string `json:"signing_algorithm"` + VerificationTTL time.Duration `json:"verification_ttl"` + RotationPeriod time.Duration `json:"rotation_period"` + KeyRing []*expireableKey `json:"key_ring"` + SigningKey *jose.JSONWebKey `json:"signing_key"` + NextRotation time.Time `json:"next_rotation"` + AllowedClientIDs []string `json:"allowed_client_ids"` +} + +type role struct { + TokenTTL time.Duration `json:"token_ttl"` + Key string `json:"key"` + Template string `json:"template"` + ClientID string `json:"client_id"` +} + +// idToken contains the required OIDC fields. +// +// Templated claims will be merged into the final output. Those claims may +// include top-level keys, but those keys may not overwrite any of the +// required OIDC fields. +type idToken struct { + Issuer string `json:"iss"` // api_addr or custom Issuer + Namespace string `json:"namespace"` // Namespace of issuer + Subject string `json:"sub"` // Entity ID + Audience string `json:"aud"` // role ID will be used here. + Expiry int64 `json:"exp"` // Expiration, as determined by the role. + IssuedAt int64 `json:"iat"` // Time of token creation +} + +// discovery contains a subset of the required elements of OIDC discovery needed +// for JWT verification libraries to use the .well-known endpoint. +// +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +type discovery struct { + Issuer string `json:"issuer"` + Keys string `json:"jwks_uri"` + ResponseTypes []string `json:"response_types_supported"` + Subjects []string `json:"subject_types_supported"` + IDTokenAlgs []string `json:"id_token_signing_alg_values_supported"` +} + +// oidcCache is a thin wrapper around go-cache to partition by namespace +type oidcCache struct { + c *cache.Cache +} + +var errNilNamespace = errors.New("nil namespace in oidc cache request") + +const ( + issuerPath = "identity/oidc" + oidcTokensPrefix = "oidc_tokens/" + oidcConfigStorageKey = oidcTokensPrefix + "config/" + namedKeyConfigPath = oidcTokensPrefix + "named_keys/" + publicKeysConfigPath = oidcTokensPrefix + "public_keys/" + roleConfigPath = oidcTokensPrefix + "roles/" +) + +var requiredClaims = []string{"iat", "aud", "exp", "iss", "sub", "namespace"} +var supportedAlgs = []string{ + string(jose.RS256), + string(jose.RS384), + string(jose.RS512), + string(jose.ES256), + string(jose.ES384), + string(jose.ES512), + string(jose.EdDSA), +} + +// pseudo-namespace for cache items that don't belong to any real namespace. +var noNamespace = &namespace.Namespace{ID: "__NO_NAMESPACE"} + +func oidcPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "oidc/config/?$", + Fields: map[string]*framework.FieldSchema{ + "issuer": { + Type: framework.TypeString, + Description: "Issuer URL to be used in the iss claim of the token. If not set, Vault's app_addr will be used.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: i.pathOIDCReadConfig, + logical.UpdateOperation: i.pathOIDCUpdateConfig, + }, + HelpSynopsis: "OIDC configuration", + HelpDescription: "Update OIDC configuration in the identity backend", + }, + { + Pattern: "oidc/key/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "rotation_period": { + Type: framework.TypeDurationSecond, + Description: "How often to generate a new keypair.", + Default: "24h", + }, + + "verification_ttl": { + Type: framework.TypeDurationSecond, + Description: "Controls how long the public portion of a key will be available for verification after being rotated.", + Default: "24h", + }, + + "algorithm": { + Type: framework.TypeString, + Description: "Signing algorithm to use. This will default to RS256.", + Default: "RS256", + }, + + "allowed_client_ids": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Comma separated string or array of role client ids allowed to use this key for signing. If empty no roles are allowed. If \"*\" all roles are allowed.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: i.pathOIDCCreateUpdateKey, + logical.UpdateOperation: i.pathOIDCCreateUpdateKey, + logical.ReadOperation: i.pathOIDCReadKey, + logical.DeleteOperation: i.pathOIDCDeleteKey, + }, + ExistenceCheck: i.pathOIDCKeyExistenceCheck, + HelpSynopsis: "CRUD operations for OIDC keys.", + HelpDescription: "Create, Read, Update, and Delete OIDC named keys.", + }, + { + Pattern: "oidc/key/" + framework.GenericNameRegex("name") + "/rotate/?$", + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + "verification_ttl": { + Type: framework.TypeDurationSecond, + Description: "Controls how long the public portion of a key will be available for verification after being rotated. Setting verification_ttl here will override the verification_ttl set on the key.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathOIDCRotateKey, + }, + HelpSynopsis: "Rotate a named OIDC key.", + HelpDescription: "Manually rotate a named OIDC key. Rotating a named key will cause a new underlying signing key to be generated. The public portion of the underlying rotated signing key will continue to live for the verification_ttl duration.", + }, + { + Pattern: "oidc/key/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathOIDCListKey, + }, + HelpSynopsis: "List OIDC keys", + HelpDescription: "List all named OIDC keys", + }, + { + Pattern: "oidc/.well-known/openid-configuration/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: i.pathOIDCDiscovery, + }, + HelpSynopsis: "Query OIDC configurations", + HelpDescription: "Query this path to retrieve the configured OIDC Issuer and Keys endpoints, response types, subject types, and signing algorithms used by the OIDC backend.", + }, + { + Pattern: "oidc/.well-known/keys/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: i.pathOIDCReadPublicKeys, + }, + HelpSynopsis: "Retrieve public keys", + HelpDescription: "Query this path to retrieve the public portion of keys used to sign OIDC tokens. Clients can use this to validate the authenticity of the OIDC token claims.", + }, + { + Pattern: "oidc/token/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: i.pathOIDCGenerateToken, + }, + HelpSynopsis: "Generate an OIDC token", + HelpDescription: "Generate an OIDC token against a configured role. The vault token used to call this path must have a corresponding entity.", + }, + { + Pattern: "oidc/role/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + "key": { + Type: framework.TypeString, + Description: "The OIDC key to use for generating tokens. The specified key must already exist.", + }, + "template": { + Type: framework.TypeString, + Description: "The template string to use for generating tokens. This may be in string-ified JSON or base64 format.", + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: "TTL of the tokens generated against the role.", + Default: "24h", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathOIDCCreateUpdateRole, + logical.CreateOperation: i.pathOIDCCreateUpdateRole, + logical.ReadOperation: i.pathOIDCReadRole, + logical.DeleteOperation: i.pathOIDCDeleteRole, + }, + ExistenceCheck: i.pathOIDCRoleExistenceCheck, + HelpSynopsis: "CRUD operations on OIDC Roles", + HelpDescription: "Create, Read, Update, and Delete OIDC Roles. OIDC tokens are generated against roles which can be configured to determine how OIDC tokens are generated.", + }, + { + Pattern: "oidc/role/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathOIDCListRole, + }, + HelpSynopsis: "List configured OIDC roles", + HelpDescription: "List all configured OIDC roles in the identity backend.", + }, + { + Pattern: "oidc/introspect/?$", + Fields: map[string]*framework.FieldSchema{ + "token": { + Type: framework.TypeString, + Description: "Token to verify", + }, + "client_id": { + Type: framework.TypeString, + Description: "Optional client_id to verify", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathOIDCIntrospect, + }, + HelpSynopsis: "Verify the authenticity of an OIDC token", + HelpDescription: "Use this path to verify the authenticity of an OIDC token and whether the associated entity is active and enabled.", + }, + } +} + +func (i *IdentityStore) pathOIDCReadConfig(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + c, err := i.getOIDCConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + + if c == nil { + return nil, nil + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "issuer": c.Issuer, + }, + } + + if i.core.redirectAddr == "" && c.Issuer == "" { + resp.AddWarning(`Both "issuer" and Vault's "api_addr" are empty. ` + + `The issuer claim in generated tokens will not be network reachable.`) + } + + return resp, nil +} + +func (i *IdentityStore) pathOIDCUpdateConfig(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var resp *logical.Response + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + issuerRaw, ok := d.GetOk("issuer") + if !ok { + return nil, nil + } + + issuer := issuerRaw.(string) + + if issuer != "" { + // verify that issuer is the correct format: + // - http or https + // - host name + // - optional port + // - nothing more + valid := false + if u, err := url.Parse(issuer); err == nil { + u2 := url.URL{ + Scheme: u.Scheme, + Host: u.Host, + } + valid = (*u == u2) && + (u.Scheme == "http" || u.Scheme == "https") && + u.Host != "" + } + + if !valid { + return logical.ErrorResponse( + "invalid issuer, which must include only a scheme, host, " + + "and optional port (e.g. https://example.com:8200)"), nil + } + + resp = &logical.Response{ + Warnings: []string{`If "issuer" is set explicitly, all tokens must be ` + + `validated against that address, including those issued by secondary ` + + `clusters. Setting issuer to "" will restore the default behavior of ` + + `using the cluster's api_addr as the issuer.`}, + } + } + + c := oidcConfig{ + Issuer: issuer, + } + + entry, err := logical.StorageEntryJSON(oidcConfigStorageKey, c) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + if err := i.oidcCache.Flush(ns); err != nil { + return nil, err + } + + return resp, nil +} + +func (i *IdentityStore) getOIDCConfig(ctx context.Context, s logical.Storage) (*oidcConfig, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + v, ok, err := i.oidcCache.Get(ns, "config") + if err != nil { + return nil, err + } + + if ok { + return v.(*oidcConfig), nil + } + + var c oidcConfig + entry, err := s.Get(ctx, oidcConfigStorageKey) + if err != nil { + return nil, err + } + + if entry != nil { + if err := entry.DecodeJSON(&c); err != nil { + return nil, err + } + } + + c.effectiveIssuer = c.Issuer + if c.effectiveIssuer == "" { + c.effectiveIssuer = i.core.redirectAddr + } + + c.effectiveIssuer += "/v1/" + ns.Path + issuerPath + + if err := i.oidcCache.SetDefault(ns, "config", &c); err != nil { + return nil, err + } + + return &c, nil +} + +// handleOIDCCreateKey is used to create a new named key or update an existing one +func (i *IdentityStore) pathOIDCCreateUpdateKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + name := d.Get("name").(string) + + i.oidcLock.Lock() + defer i.oidcLock.Unlock() + + var key namedKey + if req.Operation == logical.UpdateOperation { + entry, err := req.Storage.Get(ctx, namedKeyConfigPath+name) + if err != nil { + return nil, err + } + if entry != nil { + if err := entry.DecodeJSON(&key); err != nil { + return nil, err + } + } + } + + if rotationPeriodRaw, ok := d.GetOk("rotation_period"); ok { + key.RotationPeriod = time.Duration(rotationPeriodRaw.(int)) * time.Second + } else if req.Operation == logical.CreateOperation { + key.RotationPeriod = time.Duration(d.Get("rotation_period").(int)) * time.Second + } + + if key.RotationPeriod < 1*time.Minute { + return logical.ErrorResponse("rotation_period must be at least one minute"), nil + } + + if verificationTTLRaw, ok := d.GetOk("verification_ttl"); ok { + key.VerificationTTL = time.Duration(verificationTTLRaw.(int)) * time.Second + } else if req.Operation == logical.CreateOperation { + key.VerificationTTL = time.Duration(d.Get("verification_ttl").(int)) * time.Second + } + + if key.VerificationTTL > 10*key.RotationPeriod { + return logical.ErrorResponse("verification_ttl cannot be longer than 10x rotation_period"), nil + } + + if allowedClientIDsRaw, ok := d.GetOk("allowed_client_ids"); ok { + key.AllowedClientIDs = allowedClientIDsRaw.([]string) + } else if req.Operation == logical.CreateOperation { + key.AllowedClientIDs = d.Get("allowed_client_ids").([]string) + } + + prevAlgorithm := key.Algorithm + if algorithm, ok := d.GetOk("algorithm"); ok { + key.Algorithm = algorithm.(string) + } else if req.Operation == logical.CreateOperation { + key.Algorithm = d.Get("algorithm").(string) + } + + if !strutil.StrListContains(supportedAlgs, key.Algorithm) { + return logical.ErrorResponse("unknown signing algorithm %q", key.Algorithm), nil + } + + // Update next rotation time if it is unset or now earlier than previously set. + nextRotation := time.Now().Add(key.RotationPeriod) + if key.NextRotation.IsZero() || nextRotation.Before(key.NextRotation) { + key.NextRotation = nextRotation + } + + // generate keys if creating a new key or changing algorithms + if key.Algorithm != prevAlgorithm { + signingKey, err := generateKeys(key.Algorithm) + if err != nil { + return nil, err + } + + key.SigningKey = signingKey + key.KeyRing = append(key.KeyRing, &expireableKey{KeyID: signingKey.Public().KeyID}) + + if err := saveOIDCPublicKey(ctx, req.Storage, signingKey.Public()); err != nil { + return nil, err + } + } + + if err := i.oidcCache.Flush(ns); err != nil { + return nil, err + } + + // store named key + entry, err := logical.StorageEntryJSON(namedKeyConfigPath+name, key) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +// handleOIDCReadKey is used to read an existing key +func (i *IdentityStore) pathOIDCReadKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + i.oidcLock.RLock() + defer i.oidcLock.RUnlock() + + entry, err := req.Storage.Get(ctx, namedKeyConfigPath+name) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var storedNamedKey namedKey + if err := entry.DecodeJSON(&storedNamedKey); err != nil { + return nil, err + } + return &logical.Response{ + Data: map[string]interface{}{ + "rotation_period": int64(storedNamedKey.RotationPeriod.Seconds()), + "verification_ttl": int64(storedNamedKey.VerificationTTL.Seconds()), + "algorithm": storedNamedKey.Algorithm, + "allowed_client_ids": storedNamedKey.AllowedClientIDs, + }, + }, nil +} + +// handleOIDCDeleteKey is used to delete a key +func (i *IdentityStore) pathOIDCDeleteKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + targetKeyName := d.Get("name").(string) + + i.oidcLock.Lock() + + // it is an error to delete a key that is actively referenced by a role + roleNames, err := req.Storage.List(ctx, roleConfigPath) + if err != nil { + return nil, err + } + + var role *role + rolesReferencingTargetKeyName := make([]string, 0) + for _, roleName := range roleNames { + entry, err := req.Storage.Get(ctx, roleConfigPath+roleName) + if err != nil { + return nil, err + } + if entry != nil { + if err := entry.DecodeJSON(&role); err != nil { + return nil, err + } + if role.Key == targetKeyName { + rolesReferencingTargetKeyName = append(rolesReferencingTargetKeyName, roleName) + } + } + } + + if len(rolesReferencingTargetKeyName) > 0 { + errorMessage := fmt.Sprintf("unable to delete key %q because it is currently referenced by these roles: %s", + targetKeyName, strings.Join(rolesReferencingTargetKeyName, ", ")) + i.oidcLock.Unlock() + return logical.ErrorResponse(errorMessage), logical.ErrInvalidRequest + } + + // key can safely be deleted now + err = req.Storage.Delete(ctx, namedKeyConfigPath+targetKeyName) + if err != nil { + return nil, err + } + + i.oidcLock.Unlock() + + _, err = i.expireOIDCPublicKeys(ctx, req.Storage) + if err != nil { + return nil, err + } + + if err := i.oidcCache.Flush(ns); err != nil { + return nil, err + } + + return nil, nil +} + +// handleOIDCListKey is used to list named keys +func (i *IdentityStore) pathOIDCListKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + i.oidcLock.RLock() + defer i.oidcLock.RUnlock() + + keys, err := req.Storage.List(ctx, namedKeyConfigPath) + if err != nil { + return nil, err + } + return logical.ListResponse(keys), nil +} + +// pathOIDCRotateKey is used to manually trigger a rotation on the named key +func (i *IdentityStore) pathOIDCRotateKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + name := d.Get("name").(string) + + i.oidcLock.Lock() + defer i.oidcLock.Unlock() + + // load the named key and perform a rotation + entry, err := req.Storage.Get(ctx, namedKeyConfigPath+name) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("no named key found at %q", name), logical.ErrInvalidRequest + } + + var storedNamedKey namedKey + if err := entry.DecodeJSON(&storedNamedKey); err != nil { + return nil, err + } + storedNamedKey.name = name + + // call rotate with an appropriate overrideTTL where < 0 means no override + verificationTTLOverride := -1 * time.Second + + if ttlRaw, ok := d.GetOk("verification_ttl"); ok { + verificationTTLOverride = time.Duration(ttlRaw.(int)) * time.Second + } + + if err := storedNamedKey.rotate(ctx, req.Storage, verificationTTLOverride); err != nil { + return nil, err + } + + if err := i.oidcCache.Flush(ns); err != nil { + return nil, err + } + + return nil, nil +} + +func (i *IdentityStore) pathOIDCKeyExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + name := d.Get("name").(string) + + i.oidcLock.RLock() + defer i.oidcLock.RUnlock() + + entry, err := req.Storage.Get(ctx, namedKeyConfigPath+name) + if err != nil { + return false, err + } + + return entry != nil, nil +} + +// handleOIDCGenerateSignToken generates and signs an OIDC token +func (i *IdentityStore) pathOIDCGenerateToken(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + roleName := d.Get("name").(string) + + role, err := i.getOIDCRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("role %q not found", roleName), nil + } + + var key *namedKey + + keyRaw, found, err := i.oidcCache.Get(ns, "namedKeys/"+role.Key) + if err != nil { + return nil, err + } + + if found { + key = keyRaw.(*namedKey) + } else { + entry, _ := req.Storage.Get(ctx, namedKeyConfigPath+role.Key) + if entry == nil { + return logical.ErrorResponse("key %q not found", role.Key), nil + } + + if err := entry.DecodeJSON(&key); err != nil { + return nil, err + } + + if err := i.oidcCache.SetDefault(ns, "namedKeys/"+role.Key, key); err != nil { + return nil, err + } + } + // Validate that the role is allowed to sign with its key (the key could have been updated) + if !strutil.StrListContains(key.AllowedClientIDs, "*") && !strutil.StrListContains(key.AllowedClientIDs, role.ClientID) { + return logical.ErrorResponse("the key %q does not list the client ID of the role %q as an allowed client ID", role.Key, roleName), nil + } + + // generate an OIDC token from entity data + if req.EntityID == "" { + return logical.ErrorResponse("no entity associated with the request's token"), nil + } + + config, err := i.getOIDCConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + + now := time.Now() + idToken := idToken{ + Issuer: config.effectiveIssuer, + Namespace: ns.ID, + Subject: req.EntityID, + Audience: role.ClientID, + Expiry: now.Add(role.TokenTTL).Unix(), + IssuedAt: now.Unix(), + } + + e, err := i.MemDBEntityByID(req.EntityID, true) + if err != nil { + return nil, err + } + if e == nil { + return nil, fmt.Errorf("error loading entity ID %q", req.EntityID) + } + + groups, inheritedGroups, err := i.groupsByEntityID(e.ID) + if err != nil { + return nil, err + } + + groups = append(groups, inheritedGroups...) + + payload, err := idToken.generatePayload(i.Logger(), role.Template, e, groups) + if err != nil { + i.Logger().Warn("error populating OIDC token template", "error", err) + } + + signedIdToken, err := key.signPayload(payload) + if err != nil { + return nil, errwrap.Wrapf("error signing OIDC token: {{err}}", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + "token": signedIdToken, + "client_id": role.ClientID, + "ttl": int64(role.TokenTTL.Seconds()), + }, + }, nil +} + +func (tok *idToken) generatePayload(logger hclog.Logger, template string, entity *identity.Entity, groups []*identity.Group) ([]byte, error) { + output := map[string]interface{}{ + "iss": tok.Issuer, + "namespace": tok.Namespace, + "sub": tok.Subject, + "aud": tok.Audience, + "exp": tok.Expiry, + "iat": tok.IssuedAt, + } + + // Parse and integrate the populated role template. Structural errors with the template _should_ + // be caught during role configuration. Error found during runtime will be logged, but they will + // not block generation of the basic ID token. They should not be returned to the requester. + _, populatedTemplate, err := identity.PopulateString(identity.PopulateStringInput{ + Mode: identity.JSONTemplating, + String: template, + Entity: entity, + Groups: groups, + // namespace? + }) + + if err != nil { + logger.Warn("error populating OIDC token template", "template", template, "error", err) + } + + if populatedTemplate != "" { + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(populatedTemplate), &parsed); err != nil { + logger.Warn("error parsing OIDC template", "template", template, "err", err) + } + + for k, v := range parsed { + if !strutil.StrListContains(requiredClaims, k) { + output[k] = v + } else { + logger.Warn("invalid top level OIDC template key", "template", template, "key", k) + } + } + } + + payload, err := json.Marshal(output) + if err != nil { + return nil, err + } + + return payload, nil +} + +func (k *namedKey) signPayload(payload []byte) (string, error) { + signingKey := jose.SigningKey{Key: k.SigningKey, Algorithm: jose.SignatureAlgorithm(k.Algorithm)} + signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{}) + if err != nil { + return "", err + } + + signature, err := signer.Sign(payload) + if err != nil { + return "", err + } + + signedIdToken, err := signature.CompactSerialize() + if err != nil { + return "", err + } + + return signedIdToken, nil +} + +func (i *IdentityStore) pathOIDCRoleExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + role, err := i.getOIDCRole(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return false, err + } + + return role != nil, nil +} + +// handleOIDCCreateRole is used to create a new role or update an existing one +func (i *IdentityStore) pathOIDCCreateUpdateRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + name := d.Get("name").(string) + + var role role + if req.Operation == logical.UpdateOperation { + entry, err := req.Storage.Get(ctx, roleConfigPath+name) + if err != nil { + return nil, err + } + if entry != nil { + if err := entry.DecodeJSON(&role); err != nil { + return nil, err + } + } + } + + if key, ok := d.GetOk("key"); ok { + role.Key = key.(string) + } else if req.Operation == logical.CreateOperation { + role.Key = d.Get("key").(string) + } + + if template, ok := d.GetOk("template"); ok { + role.Template = template.(string) + } else if req.Operation == logical.CreateOperation { + role.Template = d.Get("template").(string) + } + + // Attempt to decode as base64 and use that if it works + if decoded, err := base64.StdEncoding.DecodeString(role.Template); err == nil { + role.Template = string(decoded) + } + + // Validate that template can be parsed and results in valid JSON + if role.Template != "" { + _, populatedTemplate, err := identity.PopulateString(identity.PopulateStringInput{ + Mode: identity.JSONTemplating, + String: role.Template, + Entity: new(identity.Entity), + Groups: make([]*identity.Group, 0), + // namespace? + }) + + if err != nil { + return logical.ErrorResponse("error parsing template: %s", err.Error()), nil + } + + var tmp map[string]interface{} + if err := json.Unmarshal([]byte(populatedTemplate), &tmp); err != nil { + return logical.ErrorResponse("error parsing template JSON: %s", err.Error()), nil + } + + for key := range tmp { + if strutil.StrListContains(requiredClaims, key) { + return logical.ErrorResponse(`top level key %q not allowed. Restricted keys: %s`, + key, strings.Join(requiredClaims, ", ")), nil + } + } + } + + if ttl, ok := d.GetOk("ttl"); ok { + role.TokenTTL = time.Duration(ttl.(int)) * time.Second + } else if req.Operation == logical.CreateOperation { + role.TokenTTL = time.Duration(d.Get("ttl").(int)) * time.Second + } + + // create role path + if role.ClientID == "" { + clientID, err := base62.Random(26) + if err != nil { + return nil, err + } + role.ClientID = clientID + } + + // store role (which was either just created or updated) + entry, err := logical.StorageEntryJSON(roleConfigPath+name, role) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + if err := i.oidcCache.Flush(ns); err != nil { + return nil, err + } + + return nil, nil +} + +// handleOIDCReadRole is used to read an existing role +func (i *IdentityStore) pathOIDCReadRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + role, err := i.getOIDCRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "client_id": role.ClientID, + "key": role.Key, + "template": role.Template, + "ttl": int64(role.TokenTTL.Seconds()), + }, + }, nil +} + +func (i *IdentityStore) getOIDCRole(ctx context.Context, s logical.Storage, roleName string) (*role, error) { + entry, err := s.Get(ctx, roleConfigPath+roleName) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + var role role + if err := entry.DecodeJSON(&role); err != nil { + return nil, err + } + + return &role, nil +} + +// handleOIDCDeleteRole is used to delete a role if it exists +func (i *IdentityStore) pathOIDCDeleteRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + err := req.Storage.Delete(ctx, roleConfigPath+name) + if err != nil { + return nil, err + } + return nil, nil +} + +// handleOIDCListRole is used to list stored a roles +func (i *IdentityStore) pathOIDCListRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roles, err := req.Storage.List(ctx, roleConfigPath) + if err != nil { + return nil, err + } + return logical.ListResponse(roles), nil +} + +func (i *IdentityStore) pathOIDCDiscovery(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var data []byte + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + v, ok, err := i.oidcCache.Get(ns, "discoveryResponse") + if err != nil { + return nil, err + } + + if ok { + data = v.([]byte) + } else { + c, err := i.getOIDCConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + + disc := discovery{ + Issuer: c.effectiveIssuer, + Keys: c.effectiveIssuer + "/.well-known/keys", + ResponseTypes: []string{"id_token"}, + Subjects: []string{"public"}, + IDTokenAlgs: supportedAlgs, + } + + data, err = json.Marshal(disc) + if err != nil { + return nil, err + } + + if err := i.oidcCache.SetDefault(ns, "discoveryResponse", data); err != nil { + return nil, err + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPStatusCode: 200, + logical.HTTPRawBody: data, + logical.HTTPContentType: "application/json", + logical.HTTPRawCacheControl: "max-age=3600", + }, + } + + return resp, nil +} + +// pathOIDCReadPublicKeys is used to retrieve all public keys so that clients can +// verify the validity of a signed OIDC token. +func (i *IdentityStore) pathOIDCReadPublicKeys(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var data []byte + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + v, ok, err := i.oidcCache.Get(ns, "jwksResponse") + if err != nil { + return nil, err + } + + if ok { + data = v.([]byte) + } else { + jwks, err := i.generatePublicJWKS(ctx, req.Storage) + if err != nil { + return nil, err + } + + data, err = json.Marshal(jwks) + if err != nil { + return nil, err + } + + if err := i.oidcCache.SetDefault(ns, "jwksResponse", data); err != nil { + return nil, err + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPStatusCode: 200, + logical.HTTPRawBody: data, + logical.HTTPContentType: "application/json", + }, + } + + // set a Cache-Control header only if there are keys, if there aren't keys + // then nextRun should not be used to set Cache-Control header because it chooses + // a time in the future that isn't based on key rotation/expiration values + keys, err := listOIDCPublicKeys(ctx, req.Storage) + if err != nil { + return nil, err + } + if len(keys) > 0 { + v, ok, err := i.oidcCache.Get(noNamespace, "nextRun") + if err != nil { + return nil, err + } + + if ok { + now := time.Now() + expireAt := v.(time.Time) + if expireAt.After(now) { + expireInSeconds := expireAt.Sub(time.Now()).Seconds() + expireInString := fmt.Sprintf("max-age=%.0f", expireInSeconds) + resp.Data[logical.HTTPRawCacheControl] = expireInString + } + } + } + + return resp, nil +} + +func (i *IdentityStore) pathOIDCIntrospect(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var claims jwt.Claims + + // helper for preparing the non-standard introspection response + introspectionResp := func(errorMsg string) (*logical.Response, error) { + response := map[string]interface{}{ + "active": true, + } + + if errorMsg != "" { + response["active"] = false + response["error"] = errorMsg + } + + data, err := json.Marshal(response) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPStatusCode: 200, + logical.HTTPRawBody: data, + logical.HTTPContentType: "application/json", + }, + } + + return resp, nil + } + + rawIDToken := d.Get("token").(string) + clientID := d.Get("client_id").(string) + + // validate basic JWT structure + parsedJWT, err := jwt.ParseSigned(rawIDToken) + if err != nil { + return introspectionResp(fmt.Sprintf("error parsing token: %s", err.Error())) + } + + // validate signature + jwks, err := i.generatePublicJWKS(ctx, req.Storage) + if err != nil { + return nil, err + } + + var valid bool + for _, key := range jwks.Keys { + if err := parsedJWT.Claims(key, &claims); err == nil { + valid = true + break + } + } + + if !valid { + return introspectionResp("unable to validate the token signature") + } + + // validate claims + c, err := i.getOIDCConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + + expected := jwt.Expected{ + Issuer: c.effectiveIssuer, + Time: time.Now(), + } + + if clientID != "" { + expected.Audience = []string{clientID} + } + + if claimsErr := claims.Validate(expected); claimsErr != nil { + return introspectionResp(fmt.Sprintf("error validating claims: %s", claimsErr.Error())) + } + + // validate entity exists and is active + entity, err := i.MemDBEntityByID(claims.Subject, true) + if err != nil { + return nil, err + } + if entity == nil { + return introspectionResp("entity was not found") + } else if entity.Disabled { + return introspectionResp("entity is disabled") + } + + return introspectionResp("") +} + +// namedKey.rotate(overrides) performs a key rotation on a namedKey and returns the +// verification_ttl that was applied. verification_ttl can be overridden with an +// overrideVerificationTTL value >= 0 +func (k *namedKey) rotate(ctx context.Context, s logical.Storage, overrideVerificationTTL time.Duration) error { + verificationTTL := k.VerificationTTL + + if overrideVerificationTTL >= 0 { + verificationTTL = overrideVerificationTTL + } + + // generate new key + signingKey, err := generateKeys(k.Algorithm) + if err != nil { + return err + } + if err := saveOIDCPublicKey(ctx, s, signingKey.Public()); err != nil { + return err + } + + now := time.Now() + + // set the previous public key's expiry time + for _, key := range k.KeyRing { + if key.KeyID == k.SigningKey.KeyID { + key.ExpireAt = now.Add(verificationTTL) + break + } + } + k.SigningKey = signingKey + k.KeyRing = append(k.KeyRing, &expireableKey{KeyID: signingKey.KeyID}) + k.NextRotation = now.Add(k.RotationPeriod) + + // store named key (it was modified when rotate was called on it) + entry, err := logical.StorageEntryJSON(namedKeyConfigPath+k.name, k) + if err != nil { + return err + } + if err := s.Put(ctx, entry); err != nil { + return err + } + + return nil +} + +// generateKeys returns a signingKey and publicKey pair +func generateKeys(algorithm string) (*jose.JSONWebKey, error) { + var key interface{} + var err error + + switch algorithm { + case "RS256", "RS384", "RS512": + // 2048 bits is recommended by RSA Laboratories as a minimum post 2015 + if key, err = rsa.GenerateKey(rand.Reader, 2048); err != nil { + return nil, err + } + case "ES256", "ES384", "ES512": + var curve elliptic.Curve + + switch algorithm { + case "ES256": + curve = elliptic.P256() + case "ES384": + curve = elliptic.P384() + case "ES512": + curve = elliptic.P521() + } + + if key, err = ecdsa.GenerateKey(curve, rand.Reader); err != nil { + return nil, err + } + case "EdDSA": + _, key, err = ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown algorithm %q", algorithm) + } + + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + jwk := &jose.JSONWebKey{ + Key: key, + KeyID: id, + Algorithm: algorithm, + Use: "sig", + } + + return jwk, nil +} + +func saveOIDCPublicKey(ctx context.Context, s logical.Storage, key jose.JSONWebKey) error { + entry, err := logical.StorageEntryJSON(publicKeysConfigPath+key.KeyID, key) + if err != nil { + return err + } + if err := s.Put(ctx, entry); err != nil { + return err + } + + return nil +} + +func loadOIDCPublicKey(ctx context.Context, s logical.Storage, keyID string) (*jose.JSONWebKey, error) { + entry, err := s.Get(ctx, publicKeysConfigPath+keyID) + if err != nil { + return nil, err + } + + var key jose.JSONWebKey + if err := entry.DecodeJSON(&key); err != nil { + return nil, err + } + + return &key, nil +} + +func listOIDCPublicKeys(ctx context.Context, s logical.Storage) ([]string, error) { + keys, err := s.List(ctx, publicKeysConfigPath) + if err != nil { + return nil, err + } + + return keys, nil +} + +func (i *IdentityStore) generatePublicJWKS(ctx context.Context, s logical.Storage) (*jose.JSONWebKeySet, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + jwksRaw, ok, err := i.oidcCache.Get(ns, "jwks") + if err != nil { + return nil, err + } + + if ok { + return jwksRaw.(*jose.JSONWebKeySet), nil + } + + if _, err := i.expireOIDCPublicKeys(ctx, s); err != nil { + return nil, err + } + + keyIDs, err := listOIDCPublicKeys(ctx, s) + if err != nil { + return nil, err + } + + jwks := &jose.JSONWebKeySet{ + Keys: make([]jose.JSONWebKey, 0, len(keyIDs)), + } + + for _, keyID := range keyIDs { + key, err := loadOIDCPublicKey(ctx, s, keyID) + if err != nil { + return nil, err + } + jwks.Keys = append(jwks.Keys, *key) + } + + if err := i.oidcCache.SetDefault(ns, "jwks", jwks); err != nil { + return nil, err + } + + return jwks, nil +} + +func (i *IdentityStore) expireOIDCPublicKeys(ctx context.Context, s logical.Storage) (time.Time, error) { + var didUpdate bool + + i.oidcLock.Lock() + defer i.oidcLock.Unlock() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return time.Time{}, err + } + + // nextExpiration will be the soonest expiration time of all keys. Initialize + // here to a relatively distant time. + nextExpiration := time.Now().Add(24 * time.Hour) + now := time.Now() + + publicKeyIDs, err := listOIDCPublicKeys(ctx, s) + if err != nil { + return now, err + } + + namedKeys, err := s.List(ctx, namedKeyConfigPath) + if err != nil { + return now, err + } + + usedKeys := make([]string, 0, 2*len(namedKeys)) + + for _, k := range namedKeys { + entry, err := s.Get(ctx, namedKeyConfigPath+k) + if err != nil { + return now, err + } + + var key namedKey + if err := entry.DecodeJSON(&key); err != nil { + return now, err + } + + // Remove any expired keys from the keyring. + keyRing := key.KeyRing + var keyringUpdated bool + + for i := 0; i < len(keyRing); i++ { + k := keyRing[i] + if !k.ExpireAt.IsZero() && k.ExpireAt.Before(now) { + keyRing[i] = keyRing[len(keyRing)-1] + keyRing = keyRing[:len(keyRing)-1] + + keyringUpdated = true + i-- + continue + } + + // Save a remaining key's next expiration if it is the earliest we've + // seen (for use by the periodicFunc for scheduling). + if !k.ExpireAt.IsZero() && k.ExpireAt.Before(nextExpiration) { + nextExpiration = k.ExpireAt + } + + // Mark the KeyID as in use so it doesn't get deleted in the next step + usedKeys = append(usedKeys, k.KeyID) + } + + // Persist any keyring updates if necessary + if keyringUpdated { + key.KeyRing = keyRing + entry, err := logical.StorageEntryJSON(entry.Key, key) + if err != nil { + i.Logger().Error("error updating key", "key", key.name, "error", err) + } + + if err := s.Put(ctx, entry); err != nil { + i.Logger().Error("error saving key", "key", key.name, "error", err) + + } + didUpdate = true + } + } + + // Delete all public keys that were not determined to be not expired and in + // use by some role. + for _, keyID := range publicKeyIDs { + if !strutil.StrListContains(usedKeys, keyID) { + didUpdate = true + if err := s.Delete(ctx, publicKeysConfigPath+keyID); err != nil { + i.Logger().Error("error deleting OIDC public key", "key_id", keyID, "error", err) + nextExpiration = now + } + i.Logger().Debug("deleted OIDC public key", "key_id", keyID) + } + } + + if didUpdate { + if err := i.oidcCache.Flush(ns); err != nil { + i.Logger().Error("error flushing oidc cache", "error", err) + } + } + + return nextExpiration, nil +} + +func (i *IdentityStore) oidcKeyRotation(ctx context.Context, s logical.Storage) (time.Time, error) { + // soonestRotation will be the soonest rotation time of all keys. Initialize + // here to a relatively distant time. + now := time.Now() + soonestRotation := now.Add(24 * time.Hour) + + i.oidcLock.Lock() + defer i.oidcLock.Unlock() + + keys, err := s.List(ctx, namedKeyConfigPath) + if err != nil { + return now, err + } + + for _, k := range keys { + entry, err := s.Get(ctx, namedKeyConfigPath+k) + if err != nil { + return now, err + } + + if entry == nil { + continue + } + + var key namedKey + if err := entry.DecodeJSON(&key); err != nil { + return now, err + } + key.name = k + + // Future key rotation that is the earliest we've seen. + if now.Before(key.NextRotation) && key.NextRotation.Before(soonestRotation) { + soonestRotation = key.NextRotation + } + + // Key that is due to be rotated. + if now.After(key.NextRotation) { + i.Logger().Debug("rotating OIDC key", "key", key.name) + if err := key.rotate(ctx, s, -1); err != nil { + return now, err + } + + // Possibly save the new rotation time + if key.NextRotation.Before(soonestRotation) { + soonestRotation = key.NextRotation + } + } + } + + return soonestRotation, nil +} + +// oidcPeriodFunc is invoked by the backend's periodFunc and runs regular key +// rotations and expiration actions. +func (i *IdentityStore) oidcPeriodicFunc(ctx context.Context) { + var nextRun time.Time + now := time.Now() + + v, ok, err := i.oidcCache.Get(noNamespace, "nextRun") + if err != nil { + i.Logger().Error("error reading oidc cache", "err", err) + return + } + + if ok { + nextRun = v.(time.Time) + } + + // The condition here is for performance, not precise timing. The actions can + // be run at any time safely, but there is no need to invoke them (which + // might be somewhat expensive if there are many roles/keys) if we're not + // past any rotation/expiration TTLs. + if now.After(nextRun) { + // Initialize to a fairly distant next run time. This will be brought in + // based on key rotation times. + nextRun = now.Add(24 * time.Hour) + + for _, ns := range i.listNamespaces() { + nsPath := ns.Path + + s := i.core.router.MatchingStorageByAPIPath(ctx, nsPath+"identity/oidc") + + if s == nil { + continue + } + + nextRotation, err := i.oidcKeyRotation(ctx, s) + if err != nil { + i.Logger().Warn("error rotating OIDC keys", "err", err) + } + + nextExpiration, err := i.expireOIDCPublicKeys(ctx, s) + if err != nil { + i.Logger().Warn("error expiring OIDC public keys", "err", err) + } + + if err := i.oidcCache.Flush(ns); err != nil { + i.Logger().Error("error flushing oidc cache", "err", err) + } + + // re-run at the soonest expiration or rotation time + if nextRotation.Before(nextRun) { + nextRun = nextRotation + } + + if nextExpiration.Before(nextRun) { + nextRun = nextExpiration + } + } + if err := i.oidcCache.SetDefault(noNamespace, "nextRun", nextRun); err != nil { + i.Logger().Error("error setting oidc cache", "err", err) + } + } +} + +func newOIDCCache() *oidcCache { + return &oidcCache{ + c: cache.New(cache.NoExpiration, cache.NoExpiration), + } +} + +func (c *oidcCache) nskey(ns *namespace.Namespace, key string) string { + return fmt.Sprintf("v0:%s:%s", ns.ID, key) +} + +func (c *oidcCache) Get(ns *namespace.Namespace, key string) (interface{}, bool, error) { + if ns == nil { + return nil, false, errNilNamespace + } + v, found := c.c.Get(c.nskey(ns, key)) + return v, found, nil +} + +func (c *oidcCache) SetDefault(ns *namespace.Namespace, key string, obj interface{}) error { + if ns == nil { + return errNilNamespace + } + c.c.SetDefault(c.nskey(ns, key), obj) + + return nil +} + +func (c *oidcCache) Flush(ns *namespace.Namespace) error { + if ns == nil { + return errNilNamespace + } + + // Remove all items from the provided namespace as well as the shared, "no namespace" section. + for itemKey := range c.c.Items() { + if isTargetNamespacedKey(itemKey, []string{noNamespace.ID, ns.ID}) { + c.c.Delete(itemKey) + } + } + + return nil +} + +// isTargetNamespacedKey returns true for a properly constructed namespaced key (::) +// where matches any targeted nsID +func isTargetNamespacedKey(nskey string, nsTargets []string) bool { + split := strings.Split(nskey, ":") + return len(split) >= 3 && strutil.StrListContains(nsTargets, split[1]) +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_oidc_util.go b/vendor/github.com/hashicorp/vault/vault/identity_store_oidc_util.go new file mode 100644 index 00000000..5152aaeb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_oidc_util.go @@ -0,0 +1,11 @@ +// +build !enterprise + +package vault + +import ( + "github.com/hashicorp/vault/helper/namespace" +) + +func (i *IdentityStore) listNamespaces() []*namespace.Namespace { + return []*namespace.Namespace{namespace.RootNamespace} +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go b/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go new file mode 100644 index 00000000..6ef28d0d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go @@ -0,0 +1,215 @@ +package vault + +import ( + "fmt" + + memdb "github.com/hashicorp/go-memdb" +) + +const ( + entitiesTable = "entities" + entityAliasesTable = "entity_aliases" + groupsTable = "groups" + groupAliasesTable = "group_aliases" +) + +func identityStoreSchema(lowerCaseName bool) *memdb.DBSchema { + iStoreSchema := &memdb.DBSchema{ + Tables: make(map[string]*memdb.TableSchema), + } + + schemas := []func(bool) *memdb.TableSchema{ + entitiesTableSchema, + aliasesTableSchema, + groupsTableSchema, + groupAliasesTableSchema, + } + + for _, schemaFunc := range schemas { + schema := schemaFunc(lowerCaseName) + if _, ok := iStoreSchema.Tables[schema.Name]; ok { + panic(fmt.Sprintf("duplicate table name: %s", schema.Name)) + } + iStoreSchema.Tables[schema.Name] = schema + } + + return iStoreSchema +} + +func aliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { + return &memdb.TableSchema{ + Name: entityAliasesTable, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + "factors": &memdb.IndexSchema{ + Name: "factors", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "MountAccessor", + }, + &memdb.StringFieldIndex{ + Field: "Name", + Lowercase: lowerCaseName, + }, + }, + }, + }, + "namespace_id": &memdb.IndexSchema{ + Name: "namespace_id", + Indexer: &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + }, + }, + } +} + +func entitiesTableSchema(lowerCaseName bool) *memdb.TableSchema { + return &memdb.TableSchema{ + Name: entitiesTable, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + "name": &memdb.IndexSchema{ + Name: "name", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + &memdb.StringFieldIndex{ + Field: "Name", + Lowercase: lowerCaseName, + }, + }, + }, + }, + "merged_entity_ids": &memdb.IndexSchema{ + Name: "merged_entity_ids", + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringSliceFieldIndex{ + Field: "MergedEntityIDs", + }, + }, + "bucket_key": &memdb.IndexSchema{ + Name: "bucket_key", + Indexer: &memdb.StringFieldIndex{ + Field: "BucketKey", + }, + }, + "namespace_id": &memdb.IndexSchema{ + Name: "namespace_id", + Indexer: &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + }, + }, + } +} + +func groupsTableSchema(lowerCaseName bool) *memdb.TableSchema { + return &memdb.TableSchema{ + Name: groupsTable, + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + "name": { + Name: "name", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + &memdb.StringFieldIndex{ + Field: "Name", + Lowercase: lowerCaseName, + }, + }, + }, + }, + "member_entity_ids": { + Name: "member_entity_ids", + AllowMissing: true, + Indexer: &memdb.StringSliceFieldIndex{ + Field: "MemberEntityIDs", + }, + }, + "parent_group_ids": { + Name: "parent_group_ids", + AllowMissing: true, + Indexer: &memdb.StringSliceFieldIndex{ + Field: "ParentGroupIDs", + }, + }, + "bucket_key": &memdb.IndexSchema{ + Name: "bucket_key", + Indexer: &memdb.StringFieldIndex{ + Field: "BucketKey", + }, + }, + "namespace_id": &memdb.IndexSchema{ + Name: "namespace_id", + Indexer: &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + }, + }, + } +} + +func groupAliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { + return &memdb.TableSchema{ + Name: groupAliasesTable, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + "factors": &memdb.IndexSchema{ + Name: "factors", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "MountAccessor", + }, + &memdb.StringFieldIndex{ + Field: "Name", + Lowercase: lowerCaseName, + }, + }, + }, + }, + "namespace_id": &memdb.IndexSchema{ + Name: "namespace_id", + Indexer: &memdb.StringFieldIndex{ + Field: "NamespaceID", + }, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go b/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go new file mode 100644 index 00000000..c0f1f97a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go @@ -0,0 +1,93 @@ +package vault + +import ( + "regexp" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // Storage prefixes + entityPrefix = "entity/" +) + +var ( + // metaKeyFormatRegEx checks if a metadata key string is valid + metaKeyFormatRegEx = regexp.MustCompile(`^[a-zA-Z0-9=/+_-]+$`).MatchString +) + +const ( + // The meta key prefix reserved for Vault's internal use + metaKeyReservedPrefix = "vault-" + + // The maximum number of metadata key pairs allowed to be registered + metaMaxKeyPairs = 64 + + // The maximum allowed length of a metadata key + metaKeyMaxLength = 128 + + // The maximum allowed length of a metadata value + metaValueMaxLength = 512 +) + +// IdentityStore is composed of its own storage view and a MemDB which +// maintains active in-memory replicas of the storage contents indexed by +// multiple fields. +type IdentityStore struct { + // IdentityStore is a secret backend in Vault + *framework.Backend + + // view is the storage sub-view where all the artifacts of identity store + // gets persisted + view logical.Storage + + // db is the in-memory database where the storage artifacts gets replicated + // to enable richer queries based on multiple indexes. + db *memdb.MemDB + + // locks to make sure things are consistent + lock sync.RWMutex + oidcLock sync.RWMutex + + // groupLock is used to protect modifications to group entries + groupLock sync.RWMutex + + // oidcCache stores common response data as well as when the periodic func needs + // to run. This is conservatively managed, and most writes to the OIDC endpoints + // will invalidate the cache. + oidcCache *oidcCache + + // logger is the server logger copied over from core + logger log.Logger + + // entityPacker is used to pack multiple entity storage entries into 256 + // buckets + entityPacker *storagepacker.StoragePacker + + // groupPacker is used to pack multiple group storage entries into 256 + // buckets + groupPacker *storagepacker.StoragePacker + + // core is the pointer to Vault's core + core *Core + + // disableLowerCaseNames indicates whether or not identity artifacts are + // operated case insensitively + disableLowerCasedNames bool +} + +type groupDiff struct { + New []*identity.Group + Deleted []*identity.Group + Unmodified []*identity.Group +} + +type casesensitivity struct { + DisableLowerCasedNames bool `json:"disable_lower_cased_names"` +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go b/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go new file mode 100644 index 00000000..2c28925d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go @@ -0,0 +1,168 @@ +package vault + +import ( + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func upgradePaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "persona$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the persona", + }, + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this persona belongs to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this persona belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the persona", + }, + "metadata": { + Type: framework.TypeKVPairs, + Description: `Metadata to be associated with the persona. +In CLI, this parameter can be repeated multiple times, and it all gets merged together. +For example: +vault metadata=key1=value1 metadata=key2=value2 +`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleEntityUpdateCommon(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]), + }, + { + Pattern: "persona/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the persona", + }, + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this persona should be tied to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this persona belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the persona", + }, + "metadata": { + Type: framework.TypeKVPairs, + Description: `Metadata to be associated with the persona. +In CLI, this parameter can be repeated multiple times, and it all gets merged together. +For example: +vault metadata=key1=value1 metadata=key2=value2 +`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleEntityUpdateCommon(), + logical.ReadOperation: i.pathAliasIDRead(), + logical.DeleteOperation: i.pathAliasIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]), + }, + { + Pattern: "persona/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathAliasIDList(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]), + }, + { + Pattern: "alias$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the alias", + }, + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to. This field is deprecated in favor of 'canonical_id'.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleAliasCreateUpdate(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]), + }, + + { + Pattern: "alias/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the alias", + }, + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias should be tied to. This field is deprecated in favor of 'canonical_id'.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias should be tied to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.handleAliasCreateUpdate(), + logical.ReadOperation: i.pathAliasIDRead(), + logical.DeleteOperation: i.pathAliasIDDelete(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]), + }, + { + Pattern: "alias/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathAliasIDList(), + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]), + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_util.go b/vendor/github.com/hashicorp/vault/vault/identity_store_util.go new file mode 100644 index 00000000..6da3638e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/identity_store_util.go @@ -0,0 +1,2085 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/errwrap" + memdb "github.com/hashicorp/go-memdb" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/identity/mfa" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + errDuplicateIdentityName = errors.New("duplicate identity name") +) + +func (c *Core) SetLoadCaseSensitiveIdentityStore(caseSensitive bool) { + c.loadCaseSensitiveIdentityStore = caseSensitive +} + +func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { + if c.identityStore == nil { + c.logger.Warn("identity store is not setup, skipping loading") + return nil + } + + loadFunc := func(context.Context) error { + err := c.identityStore.loadEntities(ctx) + if err != nil { + return err + } + return c.identityStore.loadGroups(ctx) + } + + if !c.loadCaseSensitiveIdentityStore { + // Load everything when memdb is set to operate on lower cased names + err := loadFunc(ctx) + switch { + case err == nil: + // If it succeeds, all is well + return nil + case err != nil && !errwrap.Contains(err, errDuplicateIdentityName.Error()): + return err + } + } + + c.identityStore.logger.Warn("enabling case sensitive identity names") + + // Set identity store to operate on case sensitive identity names + c.identityStore.disableLowerCasedNames = true + + // Swap the memdb instance by the one which operates on case sensitive + // names, hence obviating the need to unload anything that's already + // loaded. + if err := c.identityStore.resetDB(ctx); err != nil { + return err + } + + // Attempt to load identity artifacts once more after memdb is reset to + // accept case sensitive names + return loadFunc(ctx) +} + +func (i *IdentityStore) sanitizeName(name string) string { + if i.disableLowerCasedNames { + return name + } + return strings.ToLower(name) +} + +func (i *IdentityStore) loadGroups(ctx context.Context) error { + i.logger.Debug("identity loading groups") + existing, err := i.groupPacker.View().List(ctx, groupBucketsPrefix) + if err != nil { + return errwrap.Wrapf("failed to scan for groups: {{err}}", err) + } + i.logger.Debug("groups collected", "num_existing", len(existing)) + + for _, key := range existing { + bucket, err := i.groupPacker.GetBucket(groupBucketsPrefix + key) + if err != nil { + return err + } + + if bucket == nil { + continue + } + + for _, item := range bucket.Items { + group, err := i.parseGroupFromBucketItem(item) + if err != nil { + return err + } + if group == nil { + continue + } + + ns, err := NamespaceByID(ctx, group.NamespaceID, i.core) + if err != nil { + return err + } + if ns == nil { + // Remove dangling groups + if !(i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) || i.core.perfStandby) { + // Group's namespace doesn't exist anymore but the group + // from the namespace still exists. + i.logger.Warn("deleting group and its any existing aliases", "name", group.Name, "namespace_id", group.NamespaceID) + err = i.groupPacker.DeleteItem(ctx, group.ID) + if err != nil { + return err + } + } + continue + } + nsCtx := namespace.ContextWithNamespace(context.Background(), ns) + + // Ensure that there are no groups with duplicate names + groupByName, err := i.MemDBGroupByName(nsCtx, group.Name, false) + if err != nil { + return err + } + if groupByName != nil { + i.logger.Warn(errDuplicateIdentityName.Error(), "group_name", group.Name, "conflicting_group_name", groupByName.Name, "action", "merge the contents of duplicated groups into one and delete the other") + if !i.disableLowerCasedNames { + return errDuplicateIdentityName + } + } + + if i.logger.IsDebug() { + i.logger.Debug("loading group", "name", group.Name, "id", group.ID) + } + + txn := i.db.Txn(true) + + // Before pull#5786, entity memberships in groups were not getting + // updated when respective entities were deleted. This is here to + // check that the entity IDs in the group are indeed valid, and if + // not remove them. + persist := false + for _, memberEntityID := range group.MemberEntityIDs { + entity, err := i.MemDBEntityByID(memberEntityID, false) + if err != nil { + return err + } + if entity == nil { + persist = true + group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, memberEntityID) + } + } + + err = i.UpsertGroupInTxn(ctx, txn, group, persist) + if err != nil { + txn.Abort() + return errwrap.Wrapf("failed to update group in memdb: {{err}}", err) + } + + txn.Commit() + } + } + + if i.logger.IsInfo() { + i.logger.Info("groups restored") + } + + return nil +} + +func (i *IdentityStore) loadEntities(ctx context.Context) error { + // Accumulate existing entities + i.logger.Debug("loading entities") + existing, err := i.entityPacker.View().List(ctx, storagepacker.StoragePackerBucketsPrefix) + if err != nil { + return errwrap.Wrapf("failed to scan for entities: {{err}}", err) + } + i.logger.Debug("entities collected", "num_existing", len(existing)) + + // Make the channels used for the worker pool + broker := make(chan string) + quit := make(chan bool) + + // Buffer these channels to prevent deadlocks + errs := make(chan error, len(existing)) + result := make(chan *storagepacker.Bucket, len(existing)) + + // Use a wait group + wg := &sync.WaitGroup{} + + // Create 64 workers to distribute work to + for j := 0; j < consts.ExpirationRestoreWorkerCount; j++ { + wg.Add(1) + go func() { + defer wg.Done() + + for { + select { + case key, ok := <-broker: + // broker has been closed, we are done + if !ok { + return + } + + bucket, err := i.entityPacker.GetBucket(storagepacker.StoragePackerBucketsPrefix + key) + if err != nil { + errs <- err + continue + } + + // Write results out to the result channel + result <- bucket + + // quit early + case <-quit: + return + } + } + }() + } + + // Distribute the collected keys to the workers in a go routine + wg.Add(1) + go func() { + defer wg.Done() + for j, key := range existing { + if j%500 == 0 { + i.logger.Debug("entities loading", "progress", j) + } + + select { + case <-quit: + return + + default: + broker <- key + } + } + + // Close the broker, causing worker routines to exit + close(broker) + }() + + // Restore each key by pulling from the result chan + for j := 0; j < len(existing); j++ { + select { + case err := <-errs: + // Close all go routines + close(quit) + + return err + + case bucket := <-result: + // If there is no entry, nothing to restore + if bucket == nil { + continue + } + + for _, item := range bucket.Items { + entity, err := i.parseEntityFromBucketItem(ctx, item) + if err != nil { + return err + } + if entity == nil { + continue + } + + ns, err := NamespaceByID(ctx, entity.NamespaceID, i.core) + if err != nil { + return err + } + if ns == nil { + // Remove dangling entities + if !(i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) || i.core.perfStandby) { + // Entity's namespace doesn't exist anymore but the + // entity from the namespace still exists. + i.logger.Warn("deleting entity and its any existing aliases", "name", entity.Name, "namespace_id", entity.NamespaceID) + err = i.entityPacker.DeleteItem(ctx, entity.ID) + if err != nil { + return err + } + } + continue + } + nsCtx := namespace.ContextWithNamespace(context.Background(), ns) + + // Ensure that there are no entities with duplicate names + entityByName, err := i.MemDBEntityByName(nsCtx, entity.Name, false) + if err != nil { + return nil + } + if entityByName != nil { + i.logger.Warn(errDuplicateIdentityName.Error(), "entity_name", entity.Name, "conflicting_entity_name", entityByName.Name, "action", "merge the duplicate entities into one") + if !i.disableLowerCasedNames { + return errDuplicateIdentityName + } + } + + // Only update MemDB and don't hit the storage again + err = i.upsertEntity(nsCtx, entity, nil, false) + if err != nil { + return errwrap.Wrapf("failed to update entity in MemDB: {{err}}", err) + } + } + } + } + + // Let all go routines finish + wg.Wait() + + if i.logger.IsInfo() { + i.logger.Info("entities restored") + } + + return nil +} + +// upsertEntityInTxn either creates or updates an existing entity. The +// operations will be updated in both MemDB and storage. If 'persist' is set to +// false, then storage will not be updated. When an alias is transferred from +// one entity to another, both the source and destination entities should get +// updated, in which case, callers should send in both entity and +// previousEntity. +func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, entity *identity.Entity, previousEntity *identity.Entity, persist bool) error { + var err error + + if txn == nil { + return errors.New("txn is nil") + } + + if entity == nil { + return errors.New("entity is nil") + } + + if entity.NamespaceID == "" { + entity.NamespaceID = namespace.RootNamespaceID + } + + if previousEntity != nil && previousEntity.NamespaceID != entity.NamespaceID { + return errors.New("entity and previous entity are not in the same namespace") + } + + aliasFactors := make([]string, len(entity.Aliases)) + + for index, alias := range entity.Aliases { + // Verify that alias is not associated to a different one already + aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, false, false) + if err != nil { + return err + } + + if alias.NamespaceID == "" { + alias.NamespaceID = namespace.RootNamespaceID + } + + switch { + case aliasByFactors == nil: + // Not found, no merging needed, just check namespace + if alias.NamespaceID != entity.NamespaceID { + return errors.New("alias and entity are not in the same namespace") + } + + case aliasByFactors.CanonicalID == entity.ID: + // Lookup found the same entity, so it's already attached to the + // right place + if aliasByFactors.NamespaceID != entity.NamespaceID { + return errors.New("alias from factors and entity are not in the same namespace") + } + + case previousEntity != nil && aliasByFactors.CanonicalID == previousEntity.ID: + // previousEntity isn't upserted yet so may still contain the old + // alias reference in memdb if it was just changed; validate + // whether or not it's _actually_ still tied to the entity + var found bool + for _, prevEntAlias := range previousEntity.Aliases { + if prevEntAlias.ID == alias.ID { + found = true + break + } + } + // If we didn't find the alias still tied to previousEntity, we + // shouldn't use the merging logic and should bail + if !found { + break + } + + // Otherwise it's still tied to previousEntity and fall through + // into merging. We don't need a namespace check here as existing + // checks when creating the aliases should ensure that all line up. + fallthrough + + default: + i.logger.Warn("alias is already tied to a different entity; these entities are being merged", "alias_id", alias.ID, "other_entity_id", aliasByFactors.CanonicalID, "entity_aliases", entity.Aliases, "alias_by_factors", aliasByFactors) + + respErr, intErr := i.mergeEntity(ctx, txn, entity, []string{aliasByFactors.CanonicalID}, true, false, true, persist) + switch { + case respErr != nil: + return respErr + case intErr != nil: + return intErr + } + + // The entity and aliases will be loaded into memdb and persisted + // as a result of the merge so we are done here + return nil + } + + if strutil.StrListContains(aliasFactors, i.sanitizeName(alias.Name)+alias.MountAccessor) { + i.logger.Warn(errDuplicateIdentityName.Error(), "alias_name", alias.Name, "mount_accessor", alias.MountAccessor, "entity_name", entity.Name, "action", "delete one of the duplicate aliases") + if !i.disableLowerCasedNames { + return errDuplicateIdentityName + } + } + + // Insert or update alias in MemDB using the transaction created above + err = i.MemDBUpsertAliasInTxn(txn, alias, false) + if err != nil { + return err + } + + aliasFactors[index] = i.sanitizeName(alias.Name) + alias.MountAccessor + } + + // If previous entity is set, update it in MemDB and persist it + if previousEntity != nil { + err = i.MemDBUpsertEntityInTxn(txn, previousEntity) + if err != nil { + return err + } + + if persist { + // Persist the previous entity object + marshaledPreviousEntity, err := ptypes.MarshalAny(previousEntity) + if err != nil { + return err + } + err = i.entityPacker.PutItem(ctx, &storagepacker.Item{ + ID: previousEntity.ID, + Message: marshaledPreviousEntity, + }) + if err != nil { + return err + } + } + } + + // Insert or update entity in MemDB using the transaction created above + err = i.MemDBUpsertEntityInTxn(txn, entity) + if err != nil { + return err + } + + if persist { + entityAsAny, err := ptypes.MarshalAny(entity) + if err != nil { + return err + } + item := &storagepacker.Item{ + ID: entity.ID, + Message: entityAsAny, + } + + // Persist the entity object + err = i.entityPacker.PutItem(ctx, item) + if err != nil { + return err + } + } + + return nil +} + +// upsertEntity either creates or updates an existing entity. The operations +// will be updated in both MemDB and storage. If 'persist' is set to false, +// then storage will not be updated. When an alias is transferred from one +// entity to another, both the source and destination entities should get +// updated, in which case, callers should send in both entity and +// previousEntity. +func (i *IdentityStore) upsertEntity(ctx context.Context, entity *identity.Entity, previousEntity *identity.Entity, persist bool) error { + + // Create a MemDB transaction to update both alias and entity + txn := i.db.Txn(true) + defer txn.Abort() + + err := i.upsertEntityInTxn(ctx, txn, entity, previousEntity, persist) + if err != nil { + return err + } + + txn.Commit() + + return nil +} + +func (i *IdentityStore) MemDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Alias, groupAlias bool) error { + if txn == nil { + return fmt.Errorf("nil txn") + } + + if alias == nil { + return fmt.Errorf("alias is nil") + } + + if alias.NamespaceID == "" { + alias.NamespaceID = namespace.RootNamespaceID + } + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "id", alias.ID) + if err != nil { + return errwrap.Wrapf("failed to lookup alias from memdb using alias ID: {{err}}", err) + } + + if aliasRaw != nil { + err = txn.Delete(tableName, aliasRaw) + if err != nil { + return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err) + } + } + + if err := txn.Insert(tableName, alias); err != nil { + return errwrap.Wrapf("failed to update alias into memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) MemDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "id", aliasID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch alias from memdb using alias ID: {{err}}", err) + } + + if aliasRaw == nil { + return nil, nil + } + + alias, ok := aliasRaw.(*identity.Alias) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched alias") + } + + if clone { + return alias.Clone() + } + + return alias, nil +} + +func (i *IdentityStore) MemDBAliasByID(aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + txn := i.db.Txn(false) + + return i.MemDBAliasByIDInTxn(txn, aliasID, clone, groupAlias) +} + +func (i *IdentityStore) MemDBAliasByFactors(mountAccessor, aliasName string, clone bool, groupAlias bool) (*identity.Alias, error) { + if aliasName == "" { + return nil, fmt.Errorf("missing alias name") + } + + if mountAccessor == "" { + return nil, fmt.Errorf("missing mount accessor") + } + + txn := i.db.Txn(false) + + return i.MemDBAliasByFactorsInTxn(txn, mountAccessor, aliasName, clone, groupAlias) +} + +func (i *IdentityStore) MemDBAliasByFactorsInTxn(txn *memdb.Txn, mountAccessor, aliasName string, clone bool, groupAlias bool) (*identity.Alias, error) { + if txn == nil { + return nil, fmt.Errorf("nil txn") + } + + if aliasName == "" { + return nil, fmt.Errorf("missing alias name") + } + + if mountAccessor == "" { + return nil, fmt.Errorf("missing mount accessor") + } + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "factors", mountAccessor, aliasName) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch alias from memdb using factors: {{err}}", err) + } + + if aliasRaw == nil { + return nil, nil + } + + alias, ok := aliasRaw.(*identity.Alias) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched alias") + } + + if clone { + return alias.Clone() + } + + return alias, nil +} + +func (i *IdentityStore) MemDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string, groupAlias bool) error { + if aliasID == "" { + return nil + } + + if txn == nil { + return fmt.Errorf("txn is nil") + } + + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, groupAlias) + if err != nil { + return err + } + + if alias == nil { + return nil + } + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + err = txn.Delete(tableName, alias) + if err != nil { + return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) MemDBAliases(ws memdb.WatchSet, groupAlias bool) (memdb.ResultIterator, error) { + txn := i.db.Txn(false) + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + iter, err := txn.Get(tableName, "id") + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + return iter, nil +} + +func (i *IdentityStore) MemDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity.Entity) error { + if txn == nil { + return fmt.Errorf("nil txn") + } + + if entity == nil { + return fmt.Errorf("entity is nil") + } + + if entity.NamespaceID == "" { + entity.NamespaceID = namespace.RootNamespaceID + } + + entityRaw, err := txn.First(entitiesTable, "id", entity.ID) + if err != nil { + return errwrap.Wrapf("failed to lookup entity from memdb using entity id: {{err}}", err) + } + + if entityRaw != nil { + err = txn.Delete(entitiesTable, entityRaw) + if err != nil { + return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err) + } + } + + if err := txn.Insert(entitiesTable, entity); err != nil { + return errwrap.Wrapf("failed to update entity into memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) MemDBEntityByIDInTxn(txn *memdb.Txn, entityID string, clone bool) (*identity.Entity, error) { + if entityID == "" { + return nil, fmt.Errorf("missing entity id") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + entityRaw, err := txn.First(entitiesTable, "id", entityID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity id: {{err}}", err) + } + + if entityRaw == nil { + return nil, nil + } + + entity, ok := entityRaw.(*identity.Entity) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched entity") + } + + if clone { + return entity.Clone() + } + + return entity, nil +} + +func (i *IdentityStore) MemDBEntityByID(entityID string, clone bool) (*identity.Entity, error) { + if entityID == "" { + return nil, fmt.Errorf("missing entity id") + } + + txn := i.db.Txn(false) + + return i.MemDBEntityByIDInTxn(txn, entityID, clone) +} + +func (i *IdentityStore) MemDBEntityByName(ctx context.Context, entityName string, clone bool) (*identity.Entity, error) { + if entityName == "" { + return nil, fmt.Errorf("missing entity name") + } + + txn := i.db.Txn(false) + + return i.MemDBEntityByNameInTxn(ctx, txn, entityName, clone) +} + +func (i *IdentityStore) MemDBEntityByNameInTxn(ctx context.Context, txn *memdb.Txn, entityName string, clone bool) (*identity.Entity, error) { + if entityName == "" { + return nil, fmt.Errorf("missing entity name") + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + entityRaw, err := txn.First(entitiesTable, "name", ns.ID, entityName) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity name: {{err}}", err) + } + + if entityRaw == nil { + return nil, nil + } + + entity, ok := entityRaw.(*identity.Entity) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched entity") + } + + if clone { + return entity.Clone() + } + + return entity, nil +} + +func (i *IdentityStore) MemDBEntitiesByBucketKeyInTxn(txn *memdb.Txn, bucketKey string) ([]*identity.Entity, error) { + if txn == nil { + return nil, fmt.Errorf("nil txn") + } + + if bucketKey == "" { + return nil, fmt.Errorf("empty bucket key") + } + + entitiesIter, err := txn.Get(entitiesTable, "bucket_key", bucketKey) + if err != nil { + return nil, errwrap.Wrapf("failed to lookup entities using bucket entry key hash: {{err}}", err) + } + + var entities []*identity.Entity + for entity := entitiesIter.Next(); entity != nil; entity = entitiesIter.Next() { + entities = append(entities, entity.(*identity.Entity)) + } + + return entities, nil +} + +func (i *IdentityStore) MemDBEntityByMergedEntityID(mergedEntityID string, clone bool) (*identity.Entity, error) { + if mergedEntityID == "" { + return nil, fmt.Errorf("missing merged entity id") + } + + txn := i.db.Txn(false) + + entityRaw, err := txn.First(entitiesTable, "merged_entity_ids", mergedEntityID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch entity from memdb using merged entity id: {{err}}", err) + } + + if entityRaw == nil { + return nil, nil + } + + entity, ok := entityRaw.(*identity.Entity) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched entity") + } + + if clone { + return entity.Clone() + } + + return entity, nil +} + +func (i *IdentityStore) MemDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Entity, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, false) + if err != nil { + return nil, err + } + + if alias == nil { + return nil, nil + } + + return i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, clone) +} + +func (i *IdentityStore) MemDBEntityByAliasID(aliasID string, clone bool) (*identity.Entity, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + txn := i.db.Txn(false) + + return i.MemDBEntityByAliasIDInTxn(txn, aliasID, clone) +} + +func (i *IdentityStore) MemDBDeleteEntityByID(entityID string) error { + if entityID == "" { + return nil + } + + txn := i.db.Txn(true) + defer txn.Abort() + + err := i.MemDBDeleteEntityByIDInTxn(txn, entityID) + if err != nil { + return err + } + + txn.Commit() + + return nil +} + +func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error { + if entityID == "" { + return nil + } + + if txn == nil { + return fmt.Errorf("txn is nil") + } + + entity, err := i.MemDBEntityByIDInTxn(txn, entityID, false) + if err != nil { + return err + } + + if entity == nil { + return nil + } + + err = txn.Delete(entitiesTable, entity) + if err != nil { + return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) sanitizeAlias(ctx context.Context, alias *identity.Alias) error { + var err error + + if alias == nil { + return fmt.Errorf("alias is nil") + } + + // Alias must always be tied to a canonical object + if alias.CanonicalID == "" { + return fmt.Errorf("missing canonical ID") + } + + // Alias must have a name + if alias.Name == "" { + return fmt.Errorf("missing alias name %q", alias.Name) + } + + // Alias metadata should always be map[string]string + err = validateMetadata(alias.Metadata) + if err != nil { + return errwrap.Wrapf("invalid alias metadata: {{err}}", err) + } + + // Create an ID if there isn't one already + if alias.ID == "" { + alias.ID, err = uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate alias ID") + } + } + + if alias.NamespaceID == "" { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + alias.NamespaceID = ns.ID + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + if ns.ID != alias.NamespaceID { + return errors.New("alias belongs to a different namespace") + } + + // Set the creation and last update times + if alias.CreationTime == nil { + alias.CreationTime = ptypes.TimestampNow() + alias.LastUpdateTime = alias.CreationTime + } else { + alias.LastUpdateTime = ptypes.TimestampNow() + } + + return nil +} + +func (i *IdentityStore) sanitizeEntity(ctx context.Context, entity *identity.Entity) error { + var err error + + if entity == nil { + return fmt.Errorf("entity is nil") + } + + // Create an ID if there isn't one already + if entity.ID == "" { + entity.ID, err = uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate entity id") + } + + // Set the storage bucket key in entity + entity.BucketKey = i.entityPacker.BucketKey(entity.ID) + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + if entity.NamespaceID == "" { + entity.NamespaceID = ns.ID + } + if ns.ID != entity.NamespaceID { + return errors.New("entity does not belong to this namespace") + } + + // Create a name if there isn't one already + if entity.Name == "" { + entity.Name, err = i.generateName(ctx, "entity") + if err != nil { + return fmt.Errorf("failed to generate entity name") + } + } + + // Entity metadata should always be map[string]string + err = validateMetadata(entity.Metadata) + if err != nil { + return errwrap.Wrapf("invalid entity metadata: {{err}}", err) + } + + // Set the creation and last update times + if entity.CreationTime == nil { + entity.CreationTime = ptypes.TimestampNow() + entity.LastUpdateTime = entity.CreationTime + } else { + entity.LastUpdateTime = ptypes.TimestampNow() + } + + // Ensure that MFASecrets is non-nil at any time. This is useful when MFA + // secret generation procedures try to append MFA info to entity. + if entity.MFASecrets == nil { + entity.MFASecrets = make(map[string]*mfa.Secret) + } + + return nil +} + +func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *identity.Group, previousGroup *identity.Group, memberGroupIDs []string) error { + var err error + + if group == nil { + return fmt.Errorf("group is nil") + } + + // Create an ID if there isn't one already + if group.ID == "" { + group.ID, err = uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate group id") + } + + // Set the hash value of the storage bucket key in group + group.BucketKey = i.groupPacker.BucketKey(group.ID) + } + + if group.NamespaceID == "" { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + group.NamespaceID = ns.ID + } + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + if ns.ID != group.NamespaceID { + return errors.New("group does not belong to this namespace") + } + + // Create a name if there isn't one already + if group.Name == "" { + group.Name, err = i.generateName(ctx, "group") + if err != nil { + return fmt.Errorf("failed to generate group name") + } + } + + // Entity metadata should always be map[string]string + err = validateMetadata(group.Metadata) + if err != nil { + return errwrap.Wrapf("invalid group metadata: {{err}}", err) + } + + // Set the creation and last update times + if group.CreationTime == nil { + group.CreationTime = ptypes.TimestampNow() + group.LastUpdateTime = group.CreationTime + } else { + group.LastUpdateTime = ptypes.TimestampNow() + } + + // Remove duplicate entity IDs and check if all IDs are valid + group.MemberEntityIDs = strutil.RemoveDuplicates(group.MemberEntityIDs, false) + for _, entityID := range group.MemberEntityIDs { + entity, err := i.MemDBEntityByID(entityID, false) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to validate entity ID %q: {{err}}", entityID), err) + } + if entity == nil { + return fmt.Errorf("invalid entity ID %q", entityID) + } + } + + txn := i.db.Txn(true) + defer txn.Abort() + + var currentMemberGroupIDs []string + var currentMemberGroups []*identity.Group + + // If there are no member group IDs supplied, then it shouldn't be + // processed. If an empty set of member group IDs are supplied, then it + // should be processed. Hence the nil check instead of the length check. + if memberGroupIDs == nil { + goto ALIAS + } + + memberGroupIDs = strutil.RemoveDuplicates(memberGroupIDs, false) + + // For those group member IDs that are removed from the list, remove current + // group ID as their respective ParentGroupID. + + // Get the current MemberGroups IDs for this group + currentMemberGroups, err = i.MemDBGroupsByParentGroupID(group.ID, false) + if err != nil { + return err + } + for _, currentMemberGroup := range currentMemberGroups { + currentMemberGroupIDs = append(currentMemberGroupIDs, currentMemberGroup.ID) + } + + // Update parent group IDs in the removed members + for _, currentMemberGroupID := range currentMemberGroupIDs { + if strutil.StrListContains(memberGroupIDs, currentMemberGroupID) { + continue + } + + currentMemberGroup, err := i.MemDBGroupByID(currentMemberGroupID, true) + if err != nil { + return err + } + if currentMemberGroup == nil { + return fmt.Errorf("invalid member group ID %q", currentMemberGroupID) + } + + // Remove group ID from the parent group IDs + currentMemberGroup.ParentGroupIDs = strutil.StrListDelete(currentMemberGroup.ParentGroupIDs, group.ID) + + err = i.UpsertGroupInTxn(ctx, txn, currentMemberGroup, true) + if err != nil { + return err + } + } + + // After the group lock is held, make membership updates to all the + // relevant groups + for _, memberGroupID := range memberGroupIDs { + memberGroup, err := i.MemDBGroupByID(memberGroupID, true) + if err != nil { + return err + } + if memberGroup == nil { + return fmt.Errorf("invalid member group ID %q", memberGroupID) + } + + // Skip if memberGroupID is already a member of group.ID + if strutil.StrListContains(memberGroup.ParentGroupIDs, group.ID) { + continue + } + + // Ensure that adding memberGroupID does not lead to cyclic + // relationships + // Detect self loop + if group.ID == memberGroupID { + return fmt.Errorf("member group ID %q is same as the ID of the group", group.ID) + } + + groupByID, err := i.MemDBGroupByID(group.ID, true) + if err != nil { + return err + } + + // If group is nil, that means that a group doesn't already exist and its + // okay to add any group as its member group. + if groupByID != nil { + // If adding the memberGroupID to groupID creates a cycle, then groupID must + // be a hop in that loop. Start a DFS traversal from memberGroupID and see if + // it reaches back to groupID. If it does, then it's a loop. + + // Created a visited set + visited := make(map[string]bool) + cycleDetected, err := i.detectCycleDFS(visited, groupByID.ID, memberGroupID) + if err != nil { + return fmt.Errorf("failed to perform cyclic relationship detection for member group ID %q", memberGroupID) + } + if cycleDetected { + return fmt.Errorf("cyclic relationship detected for member group ID %q", memberGroupID) + } + } + + memberGroup.ParentGroupIDs = append(memberGroup.ParentGroupIDs, group.ID) + + // This technically is not upsert. It is only update, only the method + // name is upsert here. + err = i.UpsertGroupInTxn(ctx, txn, memberGroup, true) + if err != nil { + // Ideally we would want to revert the whole operation in case of + // errors while persisting in member groups. But there is no + // storage transaction support yet. When we do have it, this will need + // an update. + return err + } + } + +ALIAS: + // Sanitize the group alias + if group.Alias != nil { + group.Alias.CanonicalID = group.ID + err = i.sanitizeAlias(ctx, group.Alias) + if err != nil { + return err + } + } + + // If previousGroup is not nil, we are moving the alias from the previous + // group to the new one. As a result we need to upsert both in the context + // of this same transaction. + if previousGroup != nil { + err = i.UpsertGroupInTxn(ctx, txn, previousGroup, true) + if err != nil { + return err + } + } + + err = i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return err + } + + txn.Commit() + + return nil +} + +func (i *IdentityStore) deleteAliasesInEntityInTxn(txn *memdb.Txn, entity *identity.Entity, aliases []*identity.Alias) error { + if entity == nil { + return fmt.Errorf("entity is nil") + } + + if txn == nil { + return fmt.Errorf("txn is nil") + } + + var remainList []*identity.Alias + var removeList []*identity.Alias + + for _, item := range aliases { + for _, alias := range entity.Aliases { + if alias.ID == item.ID { + removeList = append(removeList, alias) + } else { + remainList = append(remainList, alias) + } + } + } + + // Remove identity indices from aliases table for those that needs to + // be removed + for _, alias := range removeList { + err := i.MemDBDeleteAliasByIDInTxn(txn, alias.ID, false) + if err != nil { + return err + } + } + + // Update the entity with remaining items + entity.Aliases = remainList + + return nil +} + +// validateMeta validates a set of key/value pairs from the agent config +func validateMetadata(meta map[string]string) error { + if len(meta) > metaMaxKeyPairs { + return fmt.Errorf("metadata cannot contain more than %d key/value pairs", metaMaxKeyPairs) + } + + for key, value := range meta { + if err := validateMetaPair(key, value); err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to load metadata pair (%q, %q): {{err}}", key, value), err) + } + } + + return nil +} + +// validateMetaPair checks that the given key/value pair is in a valid format +func validateMetaPair(key, value string) error { + if key == "" { + return fmt.Errorf("key cannot be blank") + } + if !metaKeyFormatRegEx(key) { + return fmt.Errorf("key contains invalid characters") + } + if len(key) > metaKeyMaxLength { + return fmt.Errorf("key is too long (limit: %d characters)", metaKeyMaxLength) + } + if strings.HasPrefix(key, metaKeyReservedPrefix) { + return fmt.Errorf("key prefix %q is reserved for internal use", metaKeyReservedPrefix) + } + if len(value) > metaValueMaxLength { + return fmt.Errorf("value is too long (limit: %d characters)", metaValueMaxLength) + } + return nil +} + +func (i *IdentityStore) MemDBGroupByNameInTxn(ctx context.Context, txn *memdb.Txn, groupName string, clone bool) (*identity.Group, error) { + if groupName == "" { + return nil, fmt.Errorf("missing group name") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + groupRaw, err := txn.First(groupsTable, "name", ns.ID, groupName) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch group from memdb using group name: {{err}}", err) + } + + if groupRaw == nil { + return nil, nil + } + + group, ok := groupRaw.(*identity.Group) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched group") + } + + if clone { + return group.Clone() + } + + return group, nil +} + +func (i *IdentityStore) MemDBGroupByName(ctx context.Context, groupName string, clone bool) (*identity.Group, error) { + if groupName == "" { + return nil, fmt.Errorf("missing group name") + } + + txn := i.db.Txn(false) + + return i.MemDBGroupByNameInTxn(ctx, txn, groupName, clone) +} + +func (i *IdentityStore) UpsertGroup(ctx context.Context, group *identity.Group, persist bool) error { + txn := i.db.Txn(true) + defer txn.Abort() + + err := i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return err + } + + txn.Commit() + + return nil +} + +func (i *IdentityStore) UpsertGroupInTxn(ctx context.Context, txn *memdb.Txn, group *identity.Group, persist bool) error { + var err error + + if txn == nil { + return fmt.Errorf("txn is nil") + } + + if group == nil { + return fmt.Errorf("group is nil") + } + + // Increment the modify index of the group + group.ModifyIndex++ + + // Clear the old alias from memdb + groupClone, err := i.MemDBGroupByID(group.ID, true) + if err != nil { + return err + } + if groupClone != nil && groupClone.Alias != nil { + err = i.MemDBDeleteAliasByIDInTxn(txn, groupClone.Alias.ID, true) + if err != nil { + return err + } + } + + // Add the new alias to memdb + if group.Alias != nil { + err = i.MemDBUpsertAliasInTxn(txn, group.Alias, true) + if err != nil { + return err + } + } + + // Insert or update group in MemDB using the transaction created above + err = i.MemDBUpsertGroupInTxn(txn, group) + if err != nil { + return err + } + + if persist { + groupAsAny, err := ptypes.MarshalAny(group) + if err != nil { + return err + } + + item := &storagepacker.Item{ + ID: group.ID, + Message: groupAsAny, + } + + sent, err := sendGroupUpgrade(i, group) + if err != nil { + return err + } + if !sent { + if err := i.groupPacker.PutItem(ctx, item); err != nil { + return err + } + } + } + + return nil +} + +func (i *IdentityStore) MemDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Group) error { + if txn == nil { + return fmt.Errorf("nil txn") + } + + if group == nil { + return fmt.Errorf("group is nil") + } + + if group.NamespaceID == "" { + group.NamespaceID = namespace.RootNamespaceID + } + + groupRaw, err := txn.First(groupsTable, "id", group.ID) + if err != nil { + return errwrap.Wrapf("failed to lookup group from memdb using group id: {{err}}", err) + } + + if groupRaw != nil { + err = txn.Delete(groupsTable, groupRaw) + if err != nil { + return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err) + } + } + + if err := txn.Insert(groupsTable, group); err != nil { + return errwrap.Wrapf("failed to update group into memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) MemDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string) error { + if groupID == "" { + return nil + } + + if txn == nil { + return fmt.Errorf("txn is nil") + } + + group, err := i.MemDBGroupByIDInTxn(txn, groupID, false) + if err != nil { + return err + } + + if group == nil { + return nil + } + + err = txn.Delete("groups", group) + if err != nil { + return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err) + } + + return nil +} + +func (i *IdentityStore) MemDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clone bool) (*identity.Group, error) { + if groupID == "" { + return nil, fmt.Errorf("missing group ID") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + groupRaw, err := txn.First(groupsTable, "id", groupID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch group from memdb using group ID: {{err}}", err) + } + + if groupRaw == nil { + return nil, nil + } + + group, ok := groupRaw.(*identity.Group) + if !ok { + return nil, fmt.Errorf("failed to declare the type of fetched group") + } + + if clone { + return group.Clone() + } + + return group, nil +} + +func (i *IdentityStore) MemDBGroupByID(groupID string, clone bool) (*identity.Group, error) { + if groupID == "" { + return nil, fmt.Errorf("missing group ID") + } + + txn := i.db.Txn(false) + + return i.MemDBGroupByIDInTxn(txn, groupID, clone) +} + +func (i *IdentityStore) MemDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGroupID string, clone bool) ([]*identity.Group, error) { + if memberGroupID == "" { + return nil, fmt.Errorf("missing member group ID") + } + + groupsIter, err := txn.Get(groupsTable, "parent_group_ids", memberGroupID) + if err != nil { + return nil, errwrap.Wrapf("failed to lookup groups using member group ID: {{err}}", err) + } + + var groups []*identity.Group + for group := groupsIter.Next(); group != nil; group = groupsIter.Next() { + entry := group.(*identity.Group) + if clone { + entry, err = entry.Clone() + if err != nil { + return nil, err + } + } + groups = append(groups, entry) + } + + return groups, nil +} + +func (i *IdentityStore) MemDBGroupsByParentGroupID(memberGroupID string, clone bool) ([]*identity.Group, error) { + if memberGroupID == "" { + return nil, fmt.Errorf("missing member group ID") + } + + txn := i.db.Txn(false) + + return i.MemDBGroupsByParentGroupIDInTxn(txn, memberGroupID, clone) +} + +func (i *IdentityStore) MemDBGroupsByMemberEntityID(entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) { + txn := i.db.Txn(false) + defer txn.Abort() + + return i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, clone, externalOnly) +} + +func (i *IdentityStore) MemDBGroupsByMemberEntityIDInTxn(txn *memdb.Txn, entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) { + if entityID == "" { + return nil, fmt.Errorf("missing entity ID") + } + + groupsIter, err := txn.Get(groupsTable, "member_entity_ids", entityID) + if err != nil { + return nil, errwrap.Wrapf("failed to lookup groups using entity ID: {{err}}", err) + } + + var groups []*identity.Group + for group := groupsIter.Next(); group != nil; group = groupsIter.Next() { + entry := group.(*identity.Group) + if externalOnly && entry.Type == groupTypeInternal { + continue + } + if clone { + entry, err = entry.Clone() + if err != nil { + return nil, err + } + } + groups = append(groups, entry) + } + + return groups, nil +} + +func (i *IdentityStore) groupPoliciesByEntityID(entityID string) (map[string][]string, error) { + if entityID == "" { + return nil, fmt.Errorf("empty entity ID") + } + + groups, err := i.MemDBGroupsByMemberEntityID(entityID, false, false) + if err != nil { + return nil, err + } + + visited := make(map[string]bool) + policies := make(map[string][]string) + for _, group := range groups { + err := i.collectPoliciesReverseDFS(group, visited, policies) + if err != nil { + return nil, err + } + } + + return policies, nil +} + +func (i *IdentityStore) groupsByEntityID(entityID string) ([]*identity.Group, []*identity.Group, error) { + if entityID == "" { + return nil, nil, fmt.Errorf("empty entity ID") + } + + groups, err := i.MemDBGroupsByMemberEntityID(entityID, true, false) + if err != nil { + return nil, nil, err + } + + visited := make(map[string]bool) + var tGroups []*identity.Group + for _, group := range groups { + gGroups, err := i.collectGroupsReverseDFS(group, visited, nil) + if err != nil { + return nil, nil, err + } + tGroups = append(tGroups, gGroups...) + } + + // Remove duplicates + groupMap := make(map[string]*identity.Group) + for _, group := range tGroups { + groupMap[group.ID] = group + } + + tGroups = make([]*identity.Group, 0, len(groupMap)) + for _, group := range groupMap { + tGroups = append(tGroups, group) + } + + diff := diffGroups(groups, tGroups) + + // For sanity + // There should not be any group that gets deleted + if len(diff.Deleted) != 0 { + return nil, nil, fmt.Errorf("failed to diff group memberships") + } + + return diff.Unmodified, diff.New, nil +} + +func (i *IdentityStore) collectGroupsReverseDFS(group *identity.Group, visited map[string]bool, groups []*identity.Group) ([]*identity.Group, error) { + if group == nil { + return nil, fmt.Errorf("nil group") + } + + // If traversal for a groupID is performed before, skip it + if visited[group.ID] { + return groups, nil + } + visited[group.ID] = true + + groups = append(groups, group) + + // Traverse all the parent groups + for _, parentGroupID := range group.ParentGroupIDs { + parentGroup, err := i.MemDBGroupByID(parentGroupID, false) + if err != nil { + return nil, err + } + if parentGroup == nil { + continue + } + groups, err = i.collectGroupsReverseDFS(parentGroup, visited, groups) + if err != nil { + return nil, fmt.Errorf("failed to collect group at parent group ID %q", parentGroup.ID) + } + } + + return groups, nil +} + +func (i *IdentityStore) collectPoliciesReverseDFS(group *identity.Group, visited map[string]bool, policies map[string][]string) error { + if group == nil { + return fmt.Errorf("nil group") + } + + // If traversal for a groupID is performed before, skip it + if visited[group.ID] { + return nil + } + visited[group.ID] = true + + policies[group.NamespaceID] = append(policies[group.NamespaceID], group.Policies...) + + // Traverse all the parent groups + for _, parentGroupID := range group.ParentGroupIDs { + parentGroup, err := i.MemDBGroupByID(parentGroupID, false) + if err != nil { + return err + } + if parentGroup == nil { + continue + } + err = i.collectPoliciesReverseDFS(parentGroup, visited, policies) + if err != nil { + return fmt.Errorf("failed to collect policies at parent group ID %q", parentGroup.ID) + } + } + + return nil +} + +func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, groupID string) (bool, error) { + // If the traversal reaches the startingGroupID, a loop is detected + if startingGroupID == groupID { + return true, nil + } + + // If traversal for a groupID is performed before, skip it + if visited[groupID] { + return false, nil + } + visited[groupID] = true + + group, err := i.MemDBGroupByID(groupID, true) + if err != nil { + return false, err + } + if group == nil { + return false, nil + } + + // Fetch all groups in which groupID is present as a ParentGroupID. In + // other words, find all the subgroups of groupID. + memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false) + if err != nil { + return false, err + } + + // DFS traverse the member groups + for _, memberGroup := range memberGroups { + cycleDetected, err := i.detectCycleDFS(visited, startingGroupID, memberGroup.ID) + if err != nil { + return false, fmt.Errorf("failed to perform cycle detection at member group ID %q", memberGroup.ID) + } + if cycleDetected { + return true, fmt.Errorf("cycle detected at member group ID %q", memberGroup.ID) + } + } + + return false, nil +} + +func (i *IdentityStore) memberGroupIDsByID(groupID string) ([]string, error) { + var memberGroupIDs []string + memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false) + if err != nil { + return nil, err + } + for _, memberGroup := range memberGroups { + memberGroupIDs = append(memberGroupIDs, memberGroup.ID) + } + return memberGroupIDs, nil +} + +func (i *IdentityStore) generateName(ctx context.Context, entryType string) (string, error) { + var name string +OUTER: + for { + randBytes, err := uuid.GenerateRandomBytes(4) + if err != nil { + return "", err + } + name = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4])) + + switch entryType { + case "entity": + entity, err := i.MemDBEntityByName(ctx, name, false) + if err != nil { + return "", err + } + if entity == nil { + break OUTER + } + case "group": + group, err := i.MemDBGroupByName(ctx, name, false) + if err != nil { + return "", err + } + if group == nil { + break OUTER + } + default: + return "", fmt.Errorf("unrecognized type %q", entryType) + } + } + + return name, nil +} + +func (i *IdentityStore) MemDBGroupsByBucketKeyInTxn(txn *memdb.Txn, bucketKey string) ([]*identity.Group, error) { + if txn == nil { + return nil, fmt.Errorf("nil txn") + } + + if bucketKey == "" { + return nil, fmt.Errorf("empty bucket key") + } + + groupsIter, err := txn.Get(groupsTable, "bucket_key", bucketKey) + if err != nil { + return nil, errwrap.Wrapf("failed to lookup groups using bucket entry key hash: {{err}}", err) + } + + var groups []*identity.Group + for group := groupsIter.Next(); group != nil; group = groupsIter.Next() { + groups = append(groups, group.(*identity.Group)) + } + + return groups, nil +} + +func (i *IdentityStore) MemDBGroupByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Group, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, true) + if err != nil { + return nil, err + } + + if alias == nil { + return nil, nil + } + + return i.MemDBGroupByIDInTxn(txn, alias.CanonicalID, clone) +} + +func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identity.Group, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + txn := i.db.Txn(false) + + return i.MemDBGroupByAliasIDInTxn(txn, aliasID, clone) +} + +func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(ctx context.Context, entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) { + i.logger.Debug("refreshing external group memberships", "entity_id", entityID, "group_aliases", groupAliases) + if entityID == "" { + return nil, fmt.Errorf("empty entity ID") + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + txn := i.db.Txn(true) + defer txn.Abort() + + oldGroups, err := i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, true, true) + if err != nil { + return nil, err + } + + mountAccessor := "" + if len(groupAliases) != 0 { + mountAccessor = groupAliases[0].MountAccessor + } + + var newGroups []*identity.Group + var validAliases []*logical.Alias + for _, alias := range groupAliases { + aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, true, true) + if err != nil { + return nil, err + } + if aliasByFactors == nil { + continue + } + mappingGroup, err := i.MemDBGroupByAliasID(aliasByFactors.ID, true) + if err != nil { + return nil, err + } + if mappingGroup == nil { + return nil, fmt.Errorf("group unavailable for a valid alias ID %q", aliasByFactors.ID) + } + + newGroups = append(newGroups, mappingGroup) + validAliases = append(validAliases, alias) + } + + diff := diffGroups(oldGroups, newGroups) + + // Add the entity ID to all the new groups + for _, group := range diff.New { + if group.Type != groupTypeExternal { + continue + } + + i.logger.Debug("adding member entity ID to external group", "member_entity_id", entityID, "group_id", group.ID) + + group.MemberEntityIDs = append(group.MemberEntityIDs, entityID) + + err = i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return nil, err + } + } + + // Remove the entity ID from all the deleted groups + for _, group := range diff.Deleted { + if group.Type != groupTypeExternal { + continue + } + + // If the external group is from a different mount, don't remove the + // entity ID from it. + if mountAccessor != "" && group.Alias != nil && group.Alias.MountAccessor != mountAccessor { + continue + } + + i.logger.Debug("removing member entity ID from external group", "member_entity_id", entityID, "group_id", group.ID) + + group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entityID) + + err = i.UpsertGroupInTxn(ctx, txn, group, true) + if err != nil { + return nil, err + } + } + + txn.Commit() + + return validAliases, nil +} + +// diffGroups is used to diff two sets of groups +func diffGroups(old, new []*identity.Group) *groupDiff { + diff := &groupDiff{} + + existing := make(map[string]*identity.Group) + for _, group := range old { + existing[group.ID] = group + } + + for _, group := range new { + // Check if the entry in new is present in the old + _, ok := existing[group.ID] + + // If its not present, then its a new entry + if !ok { + diff.New = append(diff.New, group) + continue + } + + // If its present, it means that its unmodified + diff.Unmodified = append(diff.Unmodified, group) + + // By deleting the unmodified from the old set, we could determine the + // ones that are stale by looking at the remaining ones. + delete(existing, group.ID) + } + + // Any remaining entries must have been deleted + for _, me := range existing { + diff.Deleted = append(diff.Deleted, me) + } + + return diff +} + +func (i *IdentityStore) handleAliasListCommon(ctx context.Context, groupAlias bool) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + ws := memdb.NewWatchSet() + + txn := i.db.Txn(false) + + iter, err := txn.Get(tableName, "namespace_id", ns.ID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch iterator for aliases in memdb: {{err}}", err) + } + + ws.Add(iter.WatchCh()) + + var aliasIDs []string + aliasInfo := map[string]interface{}{} + + type mountInfo struct { + MountType string + MountPath string + } + mountAccessorMap := map[string]mountInfo{} + + for { + raw := iter.Next() + if raw == nil { + break + } + alias := raw.(*identity.Alias) + aliasIDs = append(aliasIDs, alias.ID) + aliasInfoEntry := map[string]interface{}{ + "name": alias.Name, + "canonical_id": alias.CanonicalID, + "mount_accessor": alias.MountAccessor, + } + + mi, ok := mountAccessorMap[alias.MountAccessor] + if ok { + aliasInfoEntry["mount_type"] = mi.MountType + aliasInfoEntry["mount_path"] = mi.MountPath + } else { + mi = mountInfo{} + if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil { + mi.MountType = mountValidationResp.MountType + mi.MountPath = mountValidationResp.MountPath + aliasInfoEntry["mount_type"] = mi.MountType + aliasInfoEntry["mount_path"] = mi.MountPath + } + mountAccessorMap[alias.MountAccessor] = mi + } + + aliasInfo[alias.ID] = aliasInfoEntry + } + + return logical.ListResponseWithInfo(aliasIDs, aliasInfo), nil +} + +func (i *IdentityStore) countEntities() (int, error) { + txn := i.db.Txn(false) + + iter, err := txn.Get(entitiesTable, "id") + if err != nil { + return -1, err + } + + count := 0 + val := iter.Next() + for val != nil { + count++ + val = iter.Next() + } + + return count, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/init.go b/vendor/github.com/hashicorp/vault/vault/init.go new file mode 100644 index 00000000..fe0207e8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/init.go @@ -0,0 +1,460 @@ +package vault + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/url" + "sync/atomic" + + "github.com/hashicorp/vault/physical/raft" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/shamir" + "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" +) + +// InitParams keeps the init function from being littered with too many +// params, that's it! +type InitParams struct { + BarrierConfig *SealConfig + RecoveryConfig *SealConfig + RootTokenPGPKey string + // LegacyShamirSeal should only be used in test code, we don't want to + // give the user a way to create legacy shamir seals. + LegacyShamirSeal bool +} + +// InitResult is used to provide the key parts back after +// they are generated as part of the initialization. +type InitResult struct { + SecretShares [][]byte + RecoveryShares [][]byte + RootToken string +} + +var ( + initPTFunc = func(c *Core) func() { return nil } + initInProgress uint32 +) + +func (c *Core) InitializeRecovery(ctx context.Context) error { + if !c.recoveryMode { + return nil + } + + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return nil + } + + parsedClusterAddr, err := url.Parse(c.ClusterAddr()) + if err != nil { + return err + } + + c.postRecoveryUnsealFuncs = append(c.postRecoveryUnsealFuncs, func() error { + return raftStorage.StartRecoveryCluster(context.Background(), raft.Peer{ + ID: raftStorage.NodeID(), + Address: parsedClusterAddr.Host, + }) + }) + + return nil +} + +// Initialized checks if the Vault is already initialized +func (c *Core) Initialized(ctx context.Context) (bool, error) { + // Check the barrier first + init, err := c.barrier.Initialized(ctx) + if err != nil { + c.logger.Error("barrier init check failed", "error", err) + return false, err + } + if !init { + c.logger.Info("security barrier not initialized") + return false, nil + } + + // Verify the seal configuration + sealConf, err := c.seal.BarrierConfig(ctx) + if err != nil { + return false, err + } + if sealConf == nil { + return false, fmt.Errorf("core: barrier reports initialized but no seal configuration found") + } + + return true, nil +} + +func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) { + // Generate a master key + masterKey, err := c.barrier.GenerateKey(c.secureRandomReader) + if err != nil { + return nil, nil, errwrap.Wrapf("key generation failed: {{err}}", err) + } + + // Return the master key if only a single key part is used + var unsealKeys [][]byte + if sc.SecretShares == 1 { + unsealKeys = append(unsealKeys, masterKey) + } else { + // Split the master key using the Shamir algorithm + shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold) + if err != nil { + return nil, nil, errwrap.Wrapf("failed to generate barrier shares: {{err}}", err) + } + unsealKeys = shares + } + + // If we have PGP keys, perform the encryption + if len(sc.PGPKeys) > 0 { + hexEncodedShares := make([][]byte, len(unsealKeys)) + for i, _ := range unsealKeys { + hexEncodedShares[i] = []byte(hex.EncodeToString(unsealKeys[i])) + } + _, encryptedShares, err := pgpkeys.EncryptShares(hexEncodedShares, sc.PGPKeys) + if err != nil { + return nil, nil, err + } + unsealKeys = encryptedShares + } + + return masterKey, unsealKeys, nil +} + +// Initialize is used to initialize the Vault with the given +// configurations. +func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitResult, error) { + atomic.StoreUint32(&initInProgress, 1) + defer atomic.StoreUint32(&initInProgress, 0) + barrierConfig := initParams.BarrierConfig + recoveryConfig := initParams.RecoveryConfig + + // N.B. Although the core is capable of handling situations where some keys + // are stored and some aren't, in practice, replication + HSMs makes this + // extremely hard to reason about, to the point that it will probably never + // be supported. The reason is that each HSM needs to encode the master key + // separately, which means the shares must be generated independently, + // which means both that the shares will be different *AND* there would + // need to be a way to actually allow fetching of the generated keys by + // operators. + if c.SealAccess().StoredKeysSupported() == StoredKeysSupportedGeneric { + if len(barrierConfig.PGPKeys) > 0 { + return nil, fmt.Errorf("PGP keys not supported when storing shares") + } + barrierConfig.SecretShares = 1 + barrierConfig.SecretThreshold = 1 + if barrierConfig.StoredShares != 1 { + c.Logger().Warn("stored keys supported on init, forcing shares/threshold to 1") + } + } + + if initParams.LegacyShamirSeal { + barrierConfig.StoredShares = 0 + } else { + barrierConfig.StoredShares = 1 + } + + if len(barrierConfig.PGPKeys) > 0 && len(barrierConfig.PGPKeys) != barrierConfig.SecretShares { + return nil, fmt.Errorf("incorrect number of PGP keys") + } + + if c.SealAccess().RecoveryKeySupported() { + if len(recoveryConfig.PGPKeys) > 0 && len(recoveryConfig.PGPKeys) != recoveryConfig.SecretShares { + return nil, fmt.Errorf("incorrect number of PGP keys for recovery") + } + } + + if c.seal.RecoveryKeySupported() { + if recoveryConfig == nil { + return nil, fmt.Errorf("recovery configuration must be supplied") + } + + if recoveryConfig.SecretShares < 1 { + return nil, fmt.Errorf("recovery configuration must specify a positive number of shares") + } + + // Check if the seal configuration is valid + if err := recoveryConfig.Validate(); err != nil { + c.logger.Error("invalid recovery configuration", "error", err) + return nil, errwrap.Wrapf("invalid recovery configuration: {{err}}", err) + } + } + + // Check if the seal configuration is valid + if err := barrierConfig.Validate(); err != nil { + c.logger.Error("invalid seal configuration", "error", err) + return nil, errwrap.Wrapf("invalid seal configuration: {{err}}", err) + } + + // Avoid an initialization race + c.stateLock.Lock() + defer c.stateLock.Unlock() + + // Check if we are initialized + init, err := c.Initialized(ctx) + if err != nil { + return nil, err + } + if init { + return nil, ErrAlreadyInit + } + + // If we have clustered storage, set it up now + if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok { + parsedClusterAddr, err := url.Parse(c.ClusterAddr()) + if err != nil { + return nil, errwrap.Wrapf("error parsing cluster address: {{err}}", err) + } + if err := raftStorage.Bootstrap(ctx, []raft.Peer{ + { + ID: raftStorage.NodeID(), + Address: parsedClusterAddr.Host, + }, + }); err != nil { + return nil, errwrap.Wrapf("could not bootstrap clustered storage: {{err}}", err) + } + + if err := raftStorage.SetupCluster(ctx, raft.SetupOpts{ + StartAsLeader: true, + }); err != nil { + return nil, errwrap.Wrapf("could not start clustered storage: {{err}}", err) + } + + defer func() { + if err := raftStorage.TeardownCluster(nil); err != nil { + c.logger.Error("failed to stop raft storage", "error", err) + } + }() + } + + err = c.seal.Init(ctx) + if err != nil { + c.logger.Error("failed to initialize seal", "error", err) + return nil, errwrap.Wrapf("error initializing seal: {{err}}", err) + } + + initPTCleanup := initPTFunc(c) + if initPTCleanup != nil { + defer initPTCleanup() + } + + barrierKey, barrierKeyShares, err := c.generateShares(barrierConfig) + if err != nil { + c.logger.Error("error generating shares", "error", err) + return nil, err + } + + var sealKey []byte + var sealKeyShares [][]byte + if barrierConfig.StoredShares == 1 && c.seal.BarrierType() == seal.Shamir { + sealKey, sealKeyShares, err = c.generateShares(barrierConfig) + if err != nil { + c.logger.Error("error generating shares", "error", err) + return nil, err + } + } + + // Initialize the barrier + if err := c.barrier.Initialize(ctx, barrierKey, sealKey, c.secureRandomReader); err != nil { + c.logger.Error("failed to initialize barrier", "error", err) + return nil, errwrap.Wrapf("failed to initialize barrier: {{err}}", err) + } + if c.logger.IsInfo() { + c.logger.Info("security barrier initialized", "stored", barrierConfig.StoredShares, "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold) + } + + // Unseal the barrier + if err := c.barrier.Unseal(ctx, barrierKey); err != nil { + c.logger.Error("failed to unseal barrier", "error", err) + return nil, errwrap.Wrapf("failed to unseal barrier: {{err}}", err) + } + + // Ensure the barrier is re-sealed + defer func() { + // Defers are LIFO so we need to run this here too to ensure the stop + // happens before sealing. preSeal also stops, so we just make the + // stopping safe against multiple calls. + if err := c.barrier.Seal(); err != nil { + c.logger.Error("failed to seal barrier", "error", err) + } + }() + + err = c.seal.SetBarrierConfig(ctx, barrierConfig) + if err != nil { + c.logger.Error("failed to save barrier configuration", "error", err) + return nil, errwrap.Wrapf("barrier configuration saving failed: {{err}}", err) + } + + results := &InitResult{ + SecretShares: [][]byte{}, + } + + // If we are storing shares, pop them out of the returned results and push + // them through the seal + switch c.seal.StoredKeysSupported() { + case StoredKeysSupportedShamirMaster: + keysToStore := [][]byte{barrierKey} + if err := c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(sealKey); err != nil { + c.logger.Error("failed to set seal key", "error", err) + return nil, errwrap.Wrapf("failed to set seal key: {{err}}", err) + } + if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { + c.logger.Error("failed to store keys", "error", err) + return nil, errwrap.Wrapf("failed to store keys: {{err}}", err) + } + results.SecretShares = sealKeyShares + case StoredKeysSupportedGeneric: + keysToStore := [][]byte{barrierKey} + if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { + c.logger.Error("failed to store keys", "error", err) + return nil, errwrap.Wrapf("failed to store keys: {{err}}", err) + } + default: + // We don't support initializing an old-style Shamir seal anymore, so + // this case is only reachable by tests. + results.SecretShares = barrierKeyShares + } + + // Perform initial setup + if err := c.setupCluster(ctx); err != nil { + c.logger.Error("cluster setup failed during init", "error", err) + return nil, err + } + + // Start tracking + if initPTCleanup != nil { + initPTCleanup() + } + + activeCtx, ctxCancel := context.WithCancel(namespace.RootContext(nil)) + if err := c.postUnseal(activeCtx, ctxCancel, standardUnsealStrategy{}); err != nil { + c.logger.Error("post-unseal setup failed during init", "error", err) + return nil, err + } + + // Save the configuration regardless, but only generate a key if it's not + // disabled. When using recovery keys they are stored in the barrier, so + // this must happen post-unseal. + if c.seal.RecoveryKeySupported() { + err = c.seal.SetRecoveryConfig(ctx, recoveryConfig) + if err != nil { + c.logger.Error("failed to save recovery configuration", "error", err) + return nil, errwrap.Wrapf("recovery configuration saving failed: {{err}}", err) + } + + if recoveryConfig.SecretShares > 0 { + recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig) + if err != nil { + c.logger.Error("failed to generate recovery shares", "error", err) + return nil, err + } + + err = c.seal.SetRecoveryKey(ctx, recoveryKey) + if err != nil { + return nil, err + } + + results.RecoveryShares = recoveryUnsealKeys + } + } + + // Generate a new root token + rootToken, err := c.tokenStore.rootToken(ctx) + if err != nil { + c.logger.Error("root token generation failed", "error", err) + return nil, err + } + results.RootToken = rootToken.ID + c.logger.Info("root token generated") + + if initParams.RootTokenPGPKey != "" { + _, encryptedVals, err := pgpkeys.EncryptShares([][]byte{[]byte(results.RootToken)}, []string{initParams.RootTokenPGPKey}) + if err != nil { + c.logger.Error("root token encryption failed", "error", err) + return nil, err + } + results.RootToken = base64.StdEncoding.EncodeToString(encryptedVals[0]) + } + + if err := c.createRaftTLSKeyring(ctx); err != nil { + c.logger.Error("failed to create raft TLS keyring", "error", err) + return nil, err + } + + // Prepare to re-seal + if err := c.preSeal(); err != nil { + c.logger.Error("pre-seal teardown failed", "error", err) + return nil, err + } + + return results, nil +} + +// UnsealWithStoredKeys performs auto-unseal using stored keys. An error +// return value of "nil" implies the Vault instance is unsealed. +// +// Callers should attempt to retry any NonFatalErrors. Callers should +// not re-attempt fatal errors. +func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { + c.unsealWithStoredKeysLock.Lock() + defer c.unsealWithStoredKeysLock.Unlock() + + if c.seal.BarrierType() == seal.Shamir { + return nil + } + + // Disallow auto-unsealing when migrating + if c.IsInSealMigration() { + return NewNonFatalError(errors.New("cannot auto-unseal during seal migration")) + } + + sealed := c.Sealed() + if !sealed { + c.Logger().Warn("attempted unseal with stored keys, but vault is already unsealed") + return nil + } + + c.Logger().Info("stored unseal keys supported, attempting fetch") + keys, err := c.seal.GetStoredKeys(ctx) + if err != nil { + return NewNonFatalError(errwrap.Wrapf("fetching stored unseal keys failed: {{err}}", err)) + } + + // This usually happens when auto-unseal is configured, but the servers have + // not been initialized yet. + if len(keys) == 0 { + return NewNonFatalError(errors.New("stored unseal keys are supported, but none were found")) + } + + unsealed := false + keysUsed := 0 + for _, key := range keys { + unsealed, err = c.Unseal(key) + if err != nil { + return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err)) + } + keysUsed++ + if unsealed { + break + } + } + + if !unsealed { + // This most likely means that the user configured Vault to only store a + // subset of the required threshold of keys. We still consider this a + // "success", since trying again would yield the same result. + c.Logger().Warn("vault still sealed after using stored unseal keys", "stored_keys_used", keysUsed) + } else { + c.Logger().Info("unsealed with stored keys", "stored_keys_used", keysUsed) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/keyring.go b/vendor/github.com/hashicorp/vault/vault/keyring.go new file mode 100644 index 00000000..9c488715 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/keyring.go @@ -0,0 +1,203 @@ +package vault + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// Keyring is used to manage multiple encryption keys used by +// the barrier. New keys can be installed and each has a sequential term. +// The term used to encrypt a key is prefixed to the key written out. +// All data is encrypted with the latest key, but storing the old keys +// allows for decryption of keys written previously. Along with the encryption +// keys, the keyring also tracks the master key. This is necessary so that +// when a new key is added to the keyring, we can encrypt with the master key +// and write out the new keyring. +type Keyring struct { + masterKey []byte + keys map[uint32]*Key + activeTerm uint32 +} + +// EncodedKeyring is used for serialization of the keyring +type EncodedKeyring struct { + MasterKey []byte + Keys []*Key +} + +// Key represents a single term, along with the key used. +type Key struct { + Term uint32 + Version int + Value []byte + InstallTime time.Time +} + +// Serialize is used to create a byte encoded key +func (k *Key) Serialize() ([]byte, error) { + return json.Marshal(k) +} + +// DeserializeKey is used to deserialize and return a new key +func DeserializeKey(buf []byte) (*Key, error) { + k := new(Key) + if err := jsonutil.DecodeJSON(buf, k); err != nil { + return nil, errwrap.Wrapf("deserialization failed: {{err}}", err) + } + return k, nil +} + +// NewKeyring creates a new keyring +func NewKeyring() *Keyring { + k := &Keyring{ + keys: make(map[uint32]*Key), + activeTerm: 0, + } + return k +} + +// Clone returns a new copy of the keyring +func (k *Keyring) Clone() *Keyring { + clone := &Keyring{ + masterKey: k.masterKey, + keys: make(map[uint32]*Key, len(k.keys)), + activeTerm: k.activeTerm, + } + for idx, key := range k.keys { + clone.keys[idx] = key + } + return clone +} + +// AddKey adds a new key to the keyring +func (k *Keyring) AddKey(key *Key) (*Keyring, error) { + // Ensure there is no conflict + if exist, ok := k.keys[key.Term]; ok { + if !bytes.Equal(key.Value, exist.Value) { + return nil, fmt.Errorf("conflicting key for term %d already installed", key.Term) + } + return k, nil + } + + // Add a time if none + if key.InstallTime.IsZero() { + key.InstallTime = time.Now() + } + + // Make a new keyring + clone := k.Clone() + + // Install the new key + clone.keys[key.Term] = key + + // Update the active term if newer + if key.Term > clone.activeTerm { + clone.activeTerm = key.Term + } + return clone, nil +} + +// RemoveKey removes a key from the keyring +func (k *Keyring) RemoveKey(term uint32) (*Keyring, error) { + // Ensure this is not the active key + if term == k.activeTerm { + return nil, fmt.Errorf("cannot remove active key") + } + + // Check if this term does not exist + if _, ok := k.keys[term]; !ok { + return k, nil + } + + // Delete the key + clone := k.Clone() + delete(clone.keys, term) + return clone, nil +} + +// ActiveTerm returns the currently active term +func (k *Keyring) ActiveTerm() uint32 { + return k.activeTerm +} + +// ActiveKey returns the active encryption key, or nil +func (k *Keyring) ActiveKey() *Key { + return k.keys[k.activeTerm] +} + +// TermKey returns the key for the given term, or nil +func (k *Keyring) TermKey(term uint32) *Key { + return k.keys[term] +} + +// SetMasterKey is used to update the master key +func (k *Keyring) SetMasterKey(val []byte) *Keyring { + valCopy := make([]byte, len(val)) + copy(valCopy, val) + clone := k.Clone() + clone.masterKey = valCopy + return clone +} + +// MasterKey returns the master key +func (k *Keyring) MasterKey() []byte { + return k.masterKey +} + +// Serialize is used to create a byte encoded keyring +func (k *Keyring) Serialize() ([]byte, error) { + // Create the encoded entry + enc := EncodedKeyring{ + MasterKey: k.masterKey, + } + for _, key := range k.keys { + enc.Keys = append(enc.Keys, key) + } + + // JSON encode the keyring + buf, err := json.Marshal(enc) + return buf, err +} + +// DeserializeKeyring is used to deserialize and return a new keyring +func DeserializeKeyring(buf []byte) (*Keyring, error) { + // Deserialize the keyring + var enc EncodedKeyring + if err := jsonutil.DecodeJSON(buf, &enc); err != nil { + return nil, errwrap.Wrapf("deserialization failed: {{err}}", err) + } + + // Create a new keyring + k := NewKeyring() + k.masterKey = enc.MasterKey + for _, key := range enc.Keys { + k.keys[key.Term] = key + if key.Term > k.activeTerm { + k.activeTerm = key.Term + } + } + return k, nil +} + +// N.B.: +// Since Go 1.5 these are not reliable; see the documentation around the memzero +// function. These are best-effort. +func (k *Keyring) Zeroize(keysToo bool) { + if k == nil { + return + } + if k.masterKey != nil { + memzero(k.masterKey) + } + if !keysToo || k.keys == nil { + return + } + for _, key := range k.keys { + memzero(key.Value) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go new file mode 100644 index 00000000..af8a21fb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go @@ -0,0 +1,240 @@ +package vault + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// CubbyholeBackendFactory constructs a new cubbyhole backend +func CubbyholeBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := &CubbyholeBackend{} + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(cubbyholeHelp), + } + + b.Backend.Paths = append(b.Backend.Paths, b.paths()...) + + if conf == nil { + return nil, fmt.Errorf("configuration passed into backend is nil") + } + b.Backend.Setup(ctx, conf) + + return b, nil +} + +// CubbyholeBackend is used for storing secrets directly into the physical +// backend. The secrets are encrypted in the durable storage. +// This differs from kv in that every token has its own private +// storage view. The view is removed when the token expires. +type CubbyholeBackend struct { + *framework.Backend + + saltUUID string + storageView logical.Storage +} + +func (b *CubbyholeBackend) paths() []*framework.Path { + return []*framework.Path{ + { + Pattern: framework.MatchAllRegex("path"), + + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: "Specifies the path of the secret.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleRead, + Summary: "Retrieve the secret at the specified location.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleWrite, + Summary: "Store a secret at the specified location.", + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.handleWrite, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleDelete, + Summary: "Deletes the secret at the specified location.", + }, + logical.ListOperation: &framework.PathOperation{ + Callback: b.handleList, + Summary: "List secret entries at the specified location.", + Description: "Folders are suffixed with /. The input must be a folder; list on a file will not return a value. The values themselves are not accessible via this command.", + }, + }, + + ExistenceCheck: b.handleExistenceCheck, + + HelpSynopsis: strings.TrimSpace(cubbyholeHelpSynopsis), + HelpDescription: strings.TrimSpace(cubbyholeHelpDescription), + }, + } +} + +func (b *CubbyholeBackend) revoke(ctx context.Context, saltedToken string) error { + if saltedToken == "" { + return fmt.Errorf("client token empty during revocation") + } + + if err := logical.ClearView(ctx, b.storageView.(*BarrierView).SubView(saltedToken+"/")); err != nil { + return err + } + + return nil +} + +func (b *CubbyholeBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + out, err := req.Storage.Get(ctx, req.ClientToken+"/"+req.Path) + if err != nil { + return false, errwrap.Wrapf("existence check failed: {{err}}", err) + } + + return out != nil, nil +} + +func (b *CubbyholeBackend) handleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.ClientToken == "" { + return nil, fmt.Errorf("client token empty") + } + + path := data.Get("path").(string) + + // Read the path + out, err := req.Storage.Get(ctx, req.ClientToken+"/"+path) + if err != nil { + return nil, errwrap.Wrapf("read failed: {{err}}", err) + } + + // Fast-path the no data case + if out == nil { + return nil, nil + } + + // Decode the data + var rawData map[string]interface{} + if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil { + return nil, errwrap.Wrapf("json decoding failed: {{err}}", err) + } + + // Generate the response + resp := &logical.Response{ + Data: rawData, + } + + return resp, nil +} + +func (b *CubbyholeBackend) handleWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.ClientToken == "" { + return nil, fmt.Errorf("client token empty") + } + // Check that some fields are given + if len(req.Data) == 0 { + return nil, fmt.Errorf("missing data fields") + } + + path := data.Get("path").(string) + + // JSON encode the data + buf, err := json.Marshal(req.Data) + if err != nil { + return nil, errwrap.Wrapf("json encoding failed: {{err}}", err) + } + + // Write out a new key + entry := &logical.StorageEntry{ + Key: req.ClientToken + "/" + path, + Value: buf, + } + if req.WrapInfo != nil && req.WrapInfo.SealWrap { + entry.SealWrap = true + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to write: {{err}}", err) + } + + return nil, nil +} + +func (b *CubbyholeBackend) handleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.ClientToken == "" { + return nil, fmt.Errorf("client token empty") + } + + path := data.Get("path").(string) + + // Delete the key at the request path + if err := req.Storage.Delete(ctx, req.ClientToken+"/"+path); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *CubbyholeBackend) handleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.ClientToken == "" { + return nil, fmt.Errorf("client token empty") + } + + // Right now we only handle directories, so ensure it ends with / We also + // check if it's empty so we don't end up doing a listing on '//' + path := data.Get("path").(string) + if path != "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } + + // List the keys at the prefix given by the request + keys, err := req.Storage.List(ctx, req.ClientToken+"/"+path) + if err != nil { + return nil, err + } + + // Strip the token + strippedKeys := make([]string, len(keys)) + for i, key := range keys { + strippedKeys[i] = strings.TrimPrefix(key, req.ClientToken+"/") + } + + // Generate the response + return logical.ListResponse(strippedKeys), nil +} + +const cubbyholeHelp = ` +The cubbyhole backend reads and writes arbitrary secrets to the backend. +The secrets are encrypted/decrypted by Vault: they are never stored +unencrypted in the backend and the backend never has an opportunity to +see the unencrypted value. + +This backend differs from the 'kv' backend in that it is namespaced +per-token. Tokens can only read and write their own values, with no +sharing possible (per-token cubbyholes). This can be useful for implementing +certain authentication workflows, as well as "scratch" areas for individual +clients. When the token is revoked, the entire set of stored values for that +token is also removed. +` + +const cubbyholeHelpSynopsis = ` +Pass-through secret storage to a token-specific cubbyhole in the storage +backend, allowing you to read/write arbitrary data into secret storage. +` + +const cubbyholeHelpDescription = ` +The cubbyhole backend reads and writes arbitrary data into secret storage, +encrypting it along the way. + +The view into the cubbyhole storage space is different for each token; it is +a per-token cubbyhole. When the token is revoked all values are removed. +` diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go new file mode 100644 index 00000000..aefdf19a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go @@ -0,0 +1,252 @@ +package vault + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +// PassthroughBackendFactory returns a PassthroughBackend +// with leases switched off +func PassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + return LeaseSwitchedPassthroughBackend(ctx, conf, false) +} + +// LeasedPassthroughBackendFactory returns a PassthroughBackend +// with leases switched on +func LeasedPassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + return LeaseSwitchedPassthroughBackend(ctx, conf, true) +} + +// LeaseSwitchedPassthroughBackend returns a PassthroughBackend +// with leases switched on or off +func LeaseSwitchedPassthroughBackend(ctx context.Context, conf *logical.BackendConfig, leases bool) (logical.Backend, error) { + var b PassthroughBackend + b.generateLeases = leases + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(passthroughHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "*", + }, + }, + + Paths: []*framework.Path{ + { + Pattern: ".*", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleRead, + logical.CreateOperation: b.handleWrite, + logical.UpdateOperation: b.handleWrite, + logical.DeleteOperation: b.handleDelete, + logical.ListOperation: b.handleList, + }, + + ExistenceCheck: b.handleExistenceCheck, + + HelpSynopsis: strings.TrimSpace(passthroughHelpSynopsis), + HelpDescription: strings.TrimSpace(passthroughHelpDescription), + }, + }, + BackendType: logical.TypeLogical, + } + + b.Backend.Secrets = []*framework.Secret{ + &framework.Secret{ + Type: "kv", + + Renew: b.handleRead, + Revoke: b.handleRevoke, + }, + } + + if conf == nil { + return nil, fmt.Errorf("configuration passed into backend is nil") + } + b.Backend.Setup(ctx, conf) + + return &b, nil +} + +// PassthroughBackend is used storing secrets directly into the physical +// backend. The secrets are encrypted in the durable storage and custom TTL +// information can be specified, but otherwise this backend doesn't do anything +// fancy. +type PassthroughBackend struct { + *framework.Backend + generateLeases bool +} + +func (b *PassthroughBackend) handleRevoke(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // This is a no-op + return nil, nil +} + +func (b *PassthroughBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + out, err := req.Storage.Get(ctx, req.Path) + if err != nil { + return false, errwrap.Wrapf("existence check failed: {{err}}", err) + } + + return out != nil, nil +} + +func (b *PassthroughBackend) handleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Read the path + out, err := req.Storage.Get(ctx, req.Path) + if err != nil { + return nil, errwrap.Wrapf("read failed: {{err}}", err) + } + + // Fast-path the no data case + if out == nil { + return nil, nil + } + + // Decode the data + var rawData map[string]interface{} + + if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil { + return nil, errwrap.Wrapf("json decoding failed: {{err}}", err) + } + + var resp *logical.Response + if b.generateLeases { + // Generate the response + resp = b.Secret("kv").Response(rawData, nil) + resp.Secret.Renewable = false + } else { + resp = &logical.Response{ + Secret: &logical.Secret{}, + Data: rawData, + } + } + + // Ensure seal wrapping is carried through if the response is + // response-wrapped + if out.SealWrap { + if resp.WrapInfo == nil { + resp.WrapInfo = &wrapping.ResponseWrapInfo{} + } + resp.WrapInfo.SealWrap = out.SealWrap + } + + // Check if there is a ttl key + ttlDuration := b.System().DefaultLeaseTTL() + ttlRaw, ok := rawData["ttl"] + if !ok { + ttlRaw, ok = rawData["lease"] + } + if ok { + dur, err := parseutil.ParseDurationSecond(ttlRaw) + if err == nil { + ttlDuration = dur + } + + if b.generateLeases { + resp.Secret.Renewable = true + } + } + + resp.Secret.TTL = ttlDuration + + return resp, nil +} + +func (b *PassthroughBackend) GeneratesLeases() bool { + return b.generateLeases +} + +func (b *PassthroughBackend) handleWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.Path == "" { + return logical.ErrorResponse("missing path"), nil + } + + // Check that some fields are given + if len(req.Data) == 0 { + return logical.ErrorResponse("missing data fields"), nil + } + + // JSON encode the data + buf, err := json.Marshal(req.Data) + if err != nil { + return nil, errwrap.Wrapf("json encoding failed: {{err}}", err) + } + + // Write out a new key + entry := &logical.StorageEntry{ + Key: req.Path, + Value: buf, + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to write: {{err}}", err) + } + + return nil, nil +} + +func (b *PassthroughBackend) handleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Delete the key at the request path + if err := req.Storage.Delete(ctx, req.Path); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *PassthroughBackend) handleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Right now we only handle directories, so ensure it ends with /; however, + // some physical backends may not handle the "/" case properly, so only add + // it if we're not listing the root + path := req.Path + if path != "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } + + // List the keys at the prefix given by the request + keys, err := req.Storage.List(ctx, path) + if err != nil { + return nil, err + } + + // Generate the response + return logical.ListResponse(keys), nil +} + +const passthroughHelp = ` +The kv backend reads and writes arbitrary secrets to the backend. +The secrets are encrypted/decrypted by Vault: they are never stored +unencrypted in the backend and the backend never has an opportunity to +see the unencrypted value. + +TTLs can be set on a per-secret basis. These TTLs will be sent down +when that secret is read, and it is assumed that some outside process will +revoke and/or replace the secret at that path. +` + +const passthroughHelpSynopsis = ` +Pass-through secret storage to the storage backend, allowing you to +read/write arbitrary data into secret storage. +` + +const passthroughHelpDescription = ` +The pass-through backend reads and writes arbitrary data into secret storage, +encrypting it along the way. + +A TTL can be specified when writing with the "ttl" field. If given, the +duration of leases returned by this backend will be set to this value. This +can be used as a hint from the writer of a secret to the consumer of a secret +that the consumer should re-read the value before the TTL has expired. +However, any revocation must be handled by the user of this backend; the lease +duration does not affect the provided data in any way. +` diff --git a/vendor/github.com/hashicorp/vault/vault/logical_raw.go b/vendor/github.com/hashicorp/vault/vault/logical_raw.go new file mode 100644 index 00000000..3c73fc30 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_raw.go @@ -0,0 +1,217 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/compressutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + // protectedPaths cannot be accessed via the raw APIs. + // This is both for security and to prevent disrupting Vault. + protectedPaths = []string{ + keyringPath, + // Changing the cluster info path can change the cluster ID which can be disruptive + coreLocalClusterInfoPath, + } +) + +type RawBackend struct { + *framework.Backend + barrier SecurityBarrier + logger log.Logger + checkRaw func(path string) error + recoveryMode bool +} + +func NewRawBackend(core *Core) *RawBackend { + r := &RawBackend{ + barrier: core.barrier, + logger: core.logger.Named("raw"), + checkRaw: func(path string) error { + return nil + }, + recoveryMode: core.recoveryMode, + } + r.Backend = &framework.Backend{ + Paths: rawPaths("sys/", r), + } + return r +} + +// handleRawRead is used to read directly from the barrier +func (b *RawBackend) handleRawRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + + if b.recoveryMode { + b.logger.Info("reading", "path", path) + } + + // Prevent access of protected paths + for _, p := range protectedPaths { + if strings.HasPrefix(path, p) { + err := fmt.Sprintf("cannot read '%s'", path) + return logical.ErrorResponse(err), logical.ErrInvalidRequest + } + } + + // Run additional checks if needed + if err := b.checkRaw(path); err != nil { + b.logger.Warn(err.Error(), "path", path) + return logical.ErrorResponse("cannot read '%s'", path), logical.ErrInvalidRequest + } + + entry, err := b.barrier.Get(ctx, path) + if err != nil { + return handleErrorNoReadOnlyForward(err) + } + if entry == nil { + return nil, nil + } + + // Run this through the decompression helper to see if it's been compressed. + // If the input contained the compression canary, `outputBytes` will hold + // the decompressed data. If the input was not compressed, then `outputBytes` + // will be nil. + outputBytes, _, err := compressutil.Decompress(entry.Value) + if err != nil { + return handleErrorNoReadOnlyForward(err) + } + + // `outputBytes` is nil if the input is uncompressed. In that case set it to the original input. + if outputBytes == nil { + outputBytes = entry.Value + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "value": string(outputBytes), + }, + } + return resp, nil +} + +// handleRawWrite is used to write directly to the barrier +func (b *RawBackend) handleRawWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + + if b.recoveryMode { + b.logger.Info("writing", "path", path) + } + + // Prevent access of protected paths + for _, p := range protectedPaths { + if strings.HasPrefix(path, p) { + err := fmt.Sprintf("cannot write '%s'", path) + return logical.ErrorResponse(err), logical.ErrInvalidRequest + } + } + + value := data.Get("value").(string) + entry := &logical.StorageEntry{ + Key: path, + Value: []byte(value), + } + if err := b.barrier.Put(ctx, entry); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + return nil, nil +} + +// handleRawDelete is used to delete directly from the barrier +func (b *RawBackend) handleRawDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + + if b.recoveryMode { + b.logger.Info("deleting", "path", path) + } + + // Prevent access of protected paths + for _, p := range protectedPaths { + if strings.HasPrefix(path, p) { + err := fmt.Sprintf("cannot delete '%s'", path) + return logical.ErrorResponse(err), logical.ErrInvalidRequest + } + } + + if err := b.barrier.Delete(ctx, path); err != nil { + return handleErrorNoReadOnlyForward(err) + } + return nil, nil +} + +// handleRawList is used to list directly from the barrier +func (b *RawBackend) handleRawList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path != "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } + + if b.recoveryMode { + b.logger.Info("listing", "path", path) + } + + // Prevent access of protected paths + for _, p := range protectedPaths { + if strings.HasPrefix(path, p) { + err := fmt.Sprintf("cannot list '%s'", path) + return logical.ErrorResponse(err), logical.ErrInvalidRequest + } + } + + // Run additional checks if needed + if err := b.checkRaw(path); err != nil { + b.logger.Warn(err.Error(), "path", path) + return logical.ErrorResponse("cannot list '%s'", path), logical.ErrInvalidRequest + } + + keys, err := b.barrier.List(ctx, path) + if err != nil { + return handleErrorNoReadOnlyForward(err) + } + return logical.ListResponse(keys), nil +} + +func rawPaths(prefix string, r *RawBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: prefix + "(raw/?$|raw/(?P.+))", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + }, + "value": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: r.handleRawRead, + Summary: "Read the value of the key at the given path.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: r.handleRawWrite, + Summary: "Update the value of the key at the given path.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: r.handleRawDelete, + Summary: "Delete the key with given path.", + }, + logical.ListOperation: &framework.PathOperation{ + Callback: r.handleRawList, + Summary: "Return a list keys for a given path prefix.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["raw"][0]), + HelpDescription: strings.TrimSpace(sysHelp["raw"][1]), + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go new file mode 100644 index 00000000..876c9abd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system.go @@ -0,0 +1,4059 @@ +package vault + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "hash" + "net/http" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/physical/raft" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + memdb "github.com/hashicorp/go-memdb" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/hostutil" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +const maxBytes = 128 * 1024 + +func systemBackendMemDBSchema() *memdb.DBSchema { + systemSchema := &memdb.DBSchema{ + Tables: make(map[string]*memdb.TableSchema), + } + + schemas := getSystemSchemas() + + for _, schemaFunc := range schemas { + schema := schemaFunc() + if _, ok := systemSchema.Tables[schema.Name]; ok { + panic(fmt.Sprintf("duplicate table name: %s", schema.Name)) + } + systemSchema.Tables[schema.Name] = schema + } + + return systemSchema +} + +func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend { + db, _ := memdb.NewMemDB(systemBackendMemDBSchema()) + + b := &SystemBackend{ + Core: core, + db: db, + logger: logger, + mfaLogger: core.baseLogger.Named("mfa"), + mfaLock: &sync.RWMutex{}, + } + + core.AddLogger(b.mfaLogger) + + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(sysHelpRoot), + + PathsSpecial: &logical.Paths{ + Root: []string{ + "auth/*", + "remount", + "audit", + "audit/*", + "raw", + "raw/*", + "replication/primary/secondary-token", + "replication/performance/primary/secondary-token", + "replication/dr/primary/secondary-token", + "replication/reindex", + "replication/dr/reindex", + "replication/performance/reindex", + "rotate", + "config/cors", + "config/auditing/*", + "config/ui/headers/*", + "plugins/catalog/*", + "revoke-prefix/*", + "revoke-force/*", + "leases/revoke-prefix/*", + "leases/revoke-force/*", + "leases/lookup/*", + }, + + Unauthenticated: []string{ + "wrapping/lookup", + "wrapping/pubkey", + "replication/status", + "internal/specs/openapi", + "internal/ui/mounts", + "internal/ui/mounts/*", + "internal/ui/namespaces", + "replication/performance/status", + "replication/dr/status", + "replication/dr/secondary/promote", + "replication/dr/secondary/update-primary", + "replication/dr/secondary/operation-token/delete", + "replication/dr/secondary/license", + "replication/dr/secondary/reindex", + "storage/raft/bootstrap/challenge", + "storage/raft/bootstrap/answer", + "init", + "seal-status", + "unseal", + "leader", + "health", + "generate-root/attempt", + "generate-root/update", + "rekey/init", + "rekey/update", + "rekey/verify", + "rekey-recovery-key/init", + "rekey-recovery-key/update", + "rekey-recovery-key/verify", + }, + + LocalStorage: []string{ + expirationSubPath, + }, + }, + } + + b.Backend.Paths = append(b.Backend.Paths, entPaths(b)...) + b.Backend.Paths = append(b.Backend.Paths, b.configPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.rekeyPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.sealPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.pluginsCatalogListPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.pluginsCatalogCRUDPath()) + b.Backend.Paths = append(b.Backend.Paths, b.pluginsReloadPath()) + b.Backend.Paths = append(b.Backend.Paths, b.auditPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.mountPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.authPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.leasePaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.policyPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.wrappingPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.toolsPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.capabilitiesPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.internalPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.pprofPaths()...) + b.Backend.Paths = append(b.Backend.Paths, b.remountPath()) + b.Backend.Paths = append(b.Backend.Paths, b.metricsPath()) + b.Backend.Paths = append(b.Backend.Paths, b.hostInfoPath()) + + if core.rawEnabled { + b.Backend.Paths = append(b.Backend.Paths, b.rawPaths()...) + } + + if _, ok := core.underlyingPhysical.(*raft.RaftBackend); ok { + b.Backend.Paths = append(b.Backend.Paths, b.raftStoragePaths()...) + } + + b.Backend.Invalidate = sysInvalidate(b) + return b +} + +func (b *SystemBackend) rawPaths() []*framework.Path { + r := &RawBackend{ + barrier: b.Core.barrier, + logger: b.logger, + checkRaw: func(path string) error { + return checkRaw(b, path) + }, + } + return rawPaths("", r) +} + +// SystemBackend implements logical.Backend and is used to interact with +// the core of the system. This backend is hardcoded to exist at the "sys" +// prefix. Conceptually it is similar to procfs on Linux. +type SystemBackend struct { + *framework.Backend + Core *Core + db *memdb.MemDB + mfaLock *sync.RWMutex + mfaLogger log.Logger + logger log.Logger +} + +// handleConfigStateSanitized returns the current configuration state. The configuration +// data that it returns is a sanitized version of the combined configuration +// file(s) provided. +func (b *SystemBackend) handleConfigStateSanitized(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config := b.Core.SanitizedConfig() + resp := &logical.Response{ + Data: config, + } + return resp, nil +} + +// handleCORSRead returns the current CORS configuration +func (b *SystemBackend) handleCORSRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + corsConf := b.Core.corsConfig + + enabled := corsConf.IsEnabled() + + resp := &logical.Response{ + Data: map[string]interface{}{ + "enabled": enabled, + }, + } + + if enabled { + corsConf.RLock() + resp.Data["allowed_origins"] = corsConf.AllowedOrigins + resp.Data["allowed_headers"] = corsConf.AllowedHeaders + corsConf.RUnlock() + } + + return resp, nil +} + +// handleCORSUpdate sets the list of origins that are allowed to make +// cross-origin requests and sets the CORS enabled flag to true +func (b *SystemBackend) handleCORSUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + origins := d.Get("allowed_origins").([]string) + headers := d.Get("allowed_headers").([]string) + + return nil, b.Core.corsConfig.Enable(ctx, origins, headers) +} + +// handleCORSDelete sets the CORS enabled flag to false and clears the list of +// allowed origins & headers. +func (b *SystemBackend) handleCORSDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return nil, b.Core.corsConfig.Disable(ctx) +} + +func (b *SystemBackend) handleTidyLeases(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + go func() { + tidyCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns) + err := b.Core.expiration.Tidy(tidyCtx) + if err != nil { + b.Backend.Logger().Error("failed to tidy leases", "error", err) + return + } + }() + + resp := &logical.Response{} + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +func (b *SystemBackend) handlePluginCatalogTypedList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginType, err := consts.ParsePluginType(d.Get("type").(string)) + if err != nil { + return nil, err + } + + plugins, err := b.Core.pluginCatalog.List(ctx, pluginType) + if err != nil { + return nil, err + } + return logical.ListResponse(plugins), nil +} + +func (b *SystemBackend) handlePluginCatalogUntypedList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginsByType := make(map[string]interface{}) + for _, pluginType := range consts.PluginTypes { + plugins, err := b.Core.pluginCatalog.List(ctx, pluginType) + if err != nil { + return nil, err + } + if len(plugins) > 0 { + sort.Strings(plugins) + pluginsByType[pluginType.String()] = plugins + } + } + return &logical.Response{ + Data: pluginsByType, + }, nil +} + +func (b *SystemBackend) handlePluginCatalogUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + + pluginTypeStr := d.Get("type").(string) + if pluginTypeStr == "" { + // If the plugin type is not provided, list it as unknown so that we + // add it to the catalog and UpdatePlugins later will sort it. + pluginTypeStr = "unknown" + } + pluginType, err := consts.ParsePluginType(pluginTypeStr) + if err != nil { + return nil, err + } + + sha256 := d.Get("sha256").(string) + if sha256 == "" { + sha256 = d.Get("sha_256").(string) + if sha256 == "" { + return logical.ErrorResponse("missing SHA-256 value"), nil + } + } + + command := d.Get("command").(string) + if command == "" { + return logical.ErrorResponse("missing command value"), nil + } + + // For backwards compatibility, also accept args as part of command. Don't + // accepts args in both command and args. + args := d.Get("args").([]string) + parts := strings.Split(command, " ") + if len(parts) <= 0 { + return logical.ErrorResponse("missing command value"), nil + } else if len(parts) > 1 && len(args) > 0 { + return logical.ErrorResponse("must not specify args in command and args field"), nil + } else if len(parts) > 1 { + args = parts[1:] + } + + env := d.Get("env").([]string) + + sha256Bytes, err := hex.DecodeString(sha256) + if err != nil { + return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err + } + + err = b.Core.pluginCatalog.Set(ctx, pluginName, pluginType, parts[0], args, env, sha256Bytes) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *SystemBackend) handlePluginCatalogRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + + pluginTypeStr := d.Get("type").(string) + if pluginTypeStr == "" { + // If the plugin type is not provided (i.e. the old + // sys/plugins/catalog/:name endpoint is being requested) short-circuit here + // and return a warning + resp := &logical.Response{} + resp.AddWarning(fmt.Sprintf("Deprecated API endpoint, cannot read plugin information from catalog for %q", pluginName)) + return resp, nil + } + + pluginType, err := consts.ParsePluginType(pluginTypeStr) + if err != nil { + return nil, err + } + + plugin, err := b.Core.pluginCatalog.Get(ctx, pluginName, pluginType) + if err != nil { + return nil, err + } + if plugin == nil { + return nil, nil + } + + command := "" + if !plugin.Builtin { + command, err = filepath.Rel(b.Core.pluginCatalog.directory, plugin.Command) + if err != nil { + return nil, err + } + } + + data := map[string]interface{}{ + "name": plugin.Name, + "args": plugin.Args, + "command": command, + "sha256": hex.EncodeToString(plugin.Sha256), + "builtin": plugin.Builtin, + } + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *SystemBackend) handlePluginCatalogDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + + var resp *logical.Response + pluginTypeStr := d.Get("type").(string) + if pluginTypeStr == "" { + // If the plugin type is not provided (i.e. the old + // sys/plugins/catalog/:name endpoint is being requested), set type to + // unknown and let pluginCatalog.Delete proceed. It should handle + // deregistering out of the old storage path (root of core/plugin-catalog) + resp = new(logical.Response) + resp.AddWarning(fmt.Sprintf("Deprecated API endpoint, cannot deregister plugin from catalog for %q", pluginName)) + pluginTypeStr = "unknown" + } + + pluginType, err := consts.ParsePluginType(pluginTypeStr) + if err != nil { + return nil, err + } + if err := b.Core.pluginCatalog.Delete(ctx, pluginName, pluginType); err != nil { + return nil, err + } + + return resp, nil +} + +func (b *SystemBackend) handlePluginReloadUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("plugin").(string) + pluginMounts := d.Get("mounts").([]string) + + if pluginName != "" && len(pluginMounts) > 0 { + return logical.ErrorResponse("plugin and mounts cannot be set at the same time"), nil + } + if pluginName == "" && len(pluginMounts) == 0 { + return logical.ErrorResponse("plugin or mounts must be provided"), nil + } + + if pluginName != "" { + err := b.Core.reloadMatchingPlugin(ctx, pluginName) + if err != nil { + return nil, err + } + } else if len(pluginMounts) > 0 { + err := b.Core.reloadMatchingPluginMounts(ctx, pluginMounts) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// handleAuditedHeaderUpdate creates or overwrites a header entry +func (b *SystemBackend) handleAuditedHeaderUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + header := d.Get("header").(string) + hmac := d.Get("hmac").(bool) + if header == "" { + return logical.ErrorResponse("missing header name"), nil + } + + headerConfig := b.Core.AuditedHeadersConfig() + err := headerConfig.add(ctx, header, hmac) + if err != nil { + return nil, err + } + + return nil, nil +} + +// handleAuditedHeaderDelete deletes the header with the given name +func (b *SystemBackend) handleAuditedHeaderDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + header := d.Get("header").(string) + if header == "" { + return logical.ErrorResponse("missing header name"), nil + } + + headerConfig := b.Core.AuditedHeadersConfig() + err := headerConfig.remove(ctx, header) + if err != nil { + return nil, err + } + + return nil, nil +} + +// handleAuditedHeaderRead returns the header configuration for the given header name +func (b *SystemBackend) handleAuditedHeaderRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + header := d.Get("header").(string) + if header == "" { + return logical.ErrorResponse("missing header name"), nil + } + + headerConfig := b.Core.AuditedHeadersConfig() + settings, ok := headerConfig.Headers[strings.ToLower(header)] + if !ok { + return logical.ErrorResponse("Could not find header in config"), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + header: settings, + }, + }, nil +} + +// handleAuditedHeadersRead returns the whole audited headers config +func (b *SystemBackend) handleAuditedHeadersRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + headerConfig := b.Core.AuditedHeadersConfig() + + return &logical.Response{ + Data: map[string]interface{}{ + "headers": headerConfig.Headers, + }, + }, nil +} + +// handleCapabilitiesAccessor returns the ACL capabilities of the +// token associated with the given accessor for a given path. +func (b *SystemBackend) handleCapabilitiesAccessor(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + accessor := d.Get("accessor").(string) + if accessor == "" { + return logical.ErrorResponse("missing accessor"), nil + } + + aEntry, err := b.Core.tokenStore.lookupByAccessor(ctx, accessor, false, false) + if err != nil { + return nil, err + } + + d.Raw["token"] = aEntry.TokenID + return b.handleCapabilities(ctx, req, d) +} + +// handleCapabilities returns the ACL capabilities of the token for a given path +func (b *SystemBackend) handleCapabilities(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var token string + if strings.HasSuffix(req.Path, "capabilities-self") { + token = req.ClientToken + } else { + tokenRaw, ok := d.Raw["token"] + if ok { + token, _ = tokenRaw.(string) + } + } + if token == "" { + return nil, fmt.Errorf("no token found") + } + + ret := &logical.Response{ + Data: map[string]interface{}{}, + } + + paths := d.Get("paths").([]string) + if len(paths) == 0 { + // Read from the deprecated field + paths = d.Get("path").([]string) + } + + if len(paths) == 0 { + return logical.ErrorResponse("paths must be supplied"), nil + } + + for _, path := range paths { + pathCap, err := b.Core.Capabilities(ctx, token, path) + if err != nil { + if !strings.HasSuffix(req.Path, "capabilities-self") && errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + return nil, &logical.StatusBadRequest{Err: "invalid token"} + } + return nil, err + } + ret.Data[path] = pathCap + } + + // This is only here for backwards compatibility + if len(paths) == 1 { + ret.Data["capabilities"] = ret.Data[paths[0]] + } + + return ret, nil +} + +// handleRekeyRetrieve returns backed-up, PGP-encrypted unseal keys from a +// rekey operation +func (b *SystemBackend) handleRekeyRetrieve( + ctx context.Context, + req *logical.Request, + data *framework.FieldData, + recovery bool) (*logical.Response, error) { + backup, err := b.Core.RekeyRetrieveBackup(ctx, recovery) + if err != nil { + return nil, errwrap.Wrapf("unable to look up backed-up keys: {{err}}", err) + } + if backup == nil { + return logical.ErrorResponse("no backed-up keys found"), nil + } + + keysB64 := map[string][]string{} + for k, v := range backup.Keys { + for _, j := range v { + currB64Keys := keysB64[k] + if currB64Keys == nil { + currB64Keys = []string{} + } + key, err := hex.DecodeString(j) + if err != nil { + return nil, errwrap.Wrapf("error decoding hex-encoded backup key: {{err}}", err) + } + currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key)) + keysB64[k] = currB64Keys + } + } + + // Format the status + resp := &logical.Response{ + Data: map[string]interface{}{ + "nonce": backup.Nonce, + "keys": backup.Keys, + "keys_base64": keysB64, + }, + } + + return resp, nil +} + +func (b *SystemBackend) handleRekeyRetrieveBarrier(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRekeyRetrieve(ctx, req, data, false) +} + +func (b *SystemBackend) handleRekeyRetrieveRecovery(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRekeyRetrieve(ctx, req, data, true) +} + +// handleRekeyDelete deletes backed-up, PGP-encrypted unseal keys from a rekey +// operation +func (b *SystemBackend) handleRekeyDelete( + ctx context.Context, + req *logical.Request, + data *framework.FieldData, + recovery bool) (*logical.Response, error) { + err := b.Core.RekeyDeleteBackup(ctx, recovery) + if err != nil { + return nil, errwrap.Wrapf("error during deletion of backed-up keys: {{err}}", err) + } + + return nil, nil +} + +func (b *SystemBackend) handleRekeyDeleteBarrier(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRekeyDelete(ctx, req, data, false) +} + +func (b *SystemBackend) handleRekeyDeleteRecovery(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRekeyDelete(ctx, req, data, true) +} + +func mountInfo(entry *MountEntry) map[string]interface{} { + info := map[string]interface{}{ + "type": entry.Type, + "description": entry.Description, + "accessor": entry.Accessor, + "local": entry.Local, + "seal_wrap": entry.SealWrap, + "external_entropy_access": entry.ExternalEntropyAccess, + "options": entry.Options, + "uuid": entry.UUID, + } + entryConfig := map[string]interface{}{ + "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()), + "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()), + "force_no_cache": entry.Config.ForceNoCache, + } + if rawVal, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok { + entryConfig["audit_non_hmac_request_keys"] = rawVal.([]string) + } + if rawVal, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok { + entryConfig["audit_non_hmac_response_keys"] = rawVal.([]string) + } + // Even though empty value is valid for ListingVisibility, we can ignore + // this case during mount since there's nothing to unset/hide. + if len(entry.Config.ListingVisibility) > 0 { + entryConfig["listing_visibility"] = entry.Config.ListingVisibility + } + if rawVal, ok := entry.synthesizedConfigCache.Load("passthrough_request_headers"); ok { + entryConfig["passthrough_request_headers"] = rawVal.([]string) + } + if rawVal, ok := entry.synthesizedConfigCache.Load("allowed_response_headers"); ok { + entryConfig["allowed_response_headers"] = rawVal.([]string) + } + if entry.Table == credentialTableType { + entryConfig["token_type"] = entry.Config.TokenType.String() + } + + info["config"] = entryConfig + + return info +} + +// handleMountTable handles the "mounts" endpoint to provide the mount table +func (b *SystemBackend) handleMountTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + b.Core.mountsLock.RLock() + defer b.Core.mountsLock.RUnlock() + + resp := &logical.Response{ + Data: make(map[string]interface{}), + } + + for _, entry := range b.Core.mounts.Entries { + // Only show entries for current namespace + if entry.Namespace().Path != ns.Path { + continue + } + + cont, err := b.Core.checkReplicatedFiltering(ctx, entry, "") + if err != nil { + return nil, err + } + if cont { + continue + } + + // Populate mount info + info := mountInfo(entry) + resp.Data[entry.Path] = info + } + + return resp, nil +} + +// handleMount is used to mount a new path +func (b *SystemBackend) handleMount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + + local := data.Get("local").(bool) + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Get all the options + path := data.Get("path").(string) + path = sanitizeMountPath(path) + + logicalType := data.Get("type").(string) + description := data.Get("description").(string) + pluginName := data.Get("plugin_name").(string) + sealWrap := data.Get("seal_wrap").(bool) + externalEntropyAccess := data.Get("external_entropy_access").(bool) + options := data.Get("options").(map[string]string) + + var config MountConfig + var apiConfig APIMountConfig + + configMap := data.Get("config").(map[string]interface{}) + if configMap != nil && len(configMap) != 0 { + err := mapstructure.Decode(configMap, &apiConfig) + if err != nil { + return logical.ErrorResponse( + "unable to convert given mount config information"), + logical.ErrInvalidRequest + } + } + + switch apiConfig.DefaultLeaseTTL { + case "": + case "system": + default: + tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)), + logical.ErrInvalidRequest + } + config.DefaultLeaseTTL = tmpDef + } + + switch apiConfig.MaxLeaseTTL { + case "": + case "system": + default: + tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)), + logical.ErrInvalidRequest + } + config.MaxLeaseTTL = tmpMax + } + + if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL { + return logical.ErrorResponse( + "given default lease TTL greater than given max lease TTL"), + logical.ErrInvalidRequest + } + + if config.DefaultLeaseTTL > b.Core.maxLeaseTTL && config.MaxLeaseTTL == 0 { + return logical.ErrorResponse(fmt.Sprintf( + "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))), + logical.ErrInvalidRequest + } + + switch logicalType { + case "": + return logical.ErrorResponse( + "backend type must be specified as a string"), + logical.ErrInvalidRequest + case "plugin": + // Only set plugin-name if mount is of type plugin, with apiConfig.PluginName + // option taking precedence. + switch { + case apiConfig.PluginName != "": + logicalType = apiConfig.PluginName + case pluginName != "": + logicalType = pluginName + default: + return logical.ErrorResponse( + "plugin_name must be provided for plugin backend"), + logical.ErrInvalidRequest + } + } + + switch logicalType { + case "kv": + case "kv-v1": + // Alias KV v1 + logicalType = "kv" + if options == nil { + options = map[string]string{} + } + options["version"] = "1" + + case "kv-v2": + // Alias KV v2 + logicalType = "kv" + if options == nil { + options = map[string]string{} + } + options["version"] = "2" + + default: + if options != nil && options["version"] != "" { + return logical.ErrorResponse(fmt.Sprintf( + "secrets engine %q does not allow setting a version", logicalType)), + logical.ErrInvalidRequest + } + } + + // Copy over the force no cache if set + if apiConfig.ForceNoCache { + config.ForceNoCache = true + } + + if err := checkListingVisibility(apiConfig.ListingVisibility); err != nil { + return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", apiConfig.ListingVisibility)), nil + } + config.ListingVisibility = apiConfig.ListingVisibility + + if len(apiConfig.AuditNonHMACRequestKeys) > 0 { + config.AuditNonHMACRequestKeys = apiConfig.AuditNonHMACRequestKeys + } + if len(apiConfig.AuditNonHMACResponseKeys) > 0 { + config.AuditNonHMACResponseKeys = apiConfig.AuditNonHMACResponseKeys + } + if len(apiConfig.PassthroughRequestHeaders) > 0 { + config.PassthroughRequestHeaders = apiConfig.PassthroughRequestHeaders + } + if len(apiConfig.AllowedResponseHeaders) > 0 { + config.AllowedResponseHeaders = apiConfig.AllowedResponseHeaders + } + + // Create the mount entry + me := &MountEntry{ + Table: mountTableType, + Path: path, + Type: logicalType, + Description: description, + Config: config, + Local: local, + SealWrap: sealWrap, + ExternalEntropyAccess: externalEntropyAccess, + Options: options, + } + + // Attempt mount + if err := b.Core.mount(ctx, me); err != nil { + b.Backend.Logger().Error("error occurred during enable mount", "path", me.Path, "error", err) + return handleError(err) + } + + return nil, nil +} + +// used to intercept an HTTPCodedError so it goes back to callee +func handleError( + err error) (*logical.Response, error) { + if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return logical.ErrorResponse(err.Error()), err + } + switch err.(type) { + case logical.HTTPCodedError: + return logical.ErrorResponse(err.Error()), err + default: + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } +} + +// Performs a similar function to handleError, but upon seeing a ReadOnlyError +// will actually strip it out to prevent forwarding +func handleErrorNoReadOnlyForward( + err error) (*logical.Response, error) { + if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return nil, fmt.Errorf("operation could not be completed as storage is read-only") + } + switch err.(type) { + case logical.HTTPCodedError: + return logical.ErrorResponse(err.Error()), err + default: + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } +} + +// handleUnmount is used to unmount a path +func (b *SystemBackend) handleUnmount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + path = sanitizeMountPath(path) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + repState := b.Core.ReplicationState() + entry := b.Core.router.MatchingMountEntry(ctx, path) + + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // We return success when the mount does not exists to not expose if the + // mount existed or not + match := b.Core.router.MatchingMount(ctx, path) + if match == "" || ns.Path+path != match { + return nil, nil + } + + _, found := b.Core.router.MatchingStoragePrefixByAPIPath(ctx, path) + if !found { + b.Backend.Logger().Error("unable to find storage for path", "path", path) + return handleError(fmt.Errorf("unable to find storage for path: %q", path)) + } + + // Attempt unmount + if err := b.Core.unmount(ctx, path); err != nil { + b.Backend.Logger().Error("unmount failed", "path", path, "error", err) + return handleError(err) + } + + // Get the view path if available + var viewPath string + if entry != nil { + viewPath = entry.ViewPath() + } + + // Remove from filtered mounts + if err := b.Core.removePathFromFilteredPaths(ctx, ns.Path+path, viewPath); err != nil { + b.Backend.Logger().Error("filtered path removal failed", path, "error", err) + return handleError(err) + } + + return nil, nil +} + +// handleRemount is used to remount a path +func (b *SystemBackend) handleRemount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + // Get the paths + fromPath := data.Get("from").(string) + toPath := data.Get("to").(string) + if fromPath == "" || toPath == "" { + return logical.ErrorResponse( + "both 'from' and 'to' path must be specified as a string"), + logical.ErrInvalidRequest + } + + entry := b.Core.router.MatchingMountEntry(ctx, fromPath) + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Attempt remount + if err := b.Core.remount(ctx, fromPath, toPath, !b.Core.PerfStandby()); err != nil { + b.Backend.Logger().Error("remount failed", "from_path", fromPath, "to_path", toPath, "error", err) + return handleError(err) + } + + // Get the view path if available + var viewPath string + if entry != nil { + viewPath = entry.ViewPath() + } + + // Remove from filtered mounts and restart evaluation process + if err := b.Core.removePathFromFilteredPaths(ctx, ns.Path+fromPath, viewPath); err != nil { + b.Backend.Logger().Error("filtered path removal failed", fromPath, "error", err) + return handleError(err) + } + + return nil, nil +} + +// handleAuthTuneRead is used to get config settings on a auth path +func (b *SystemBackend) handleAuthTuneRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path == "" { + return logical.ErrorResponse( + "path must be specified as a string"), + logical.ErrInvalidRequest + } + return b.handleTuneReadCommon(ctx, "auth/"+path) +} + +// handleMountTuneRead is used to get config settings on a backend +func (b *SystemBackend) handleMountTuneRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path == "" { + return logical.ErrorResponse( + "path must be specified as a string"), + logical.ErrInvalidRequest + } + + // This call will read both logical backend's configuration as well as auth methods'. + // Retaining this behavior for backward compatibility. If this behavior is not desired, + // an error can be returned if path has a prefix of "auth/". + return b.handleTuneReadCommon(ctx, path) +} + +// handleTuneReadCommon returns the config settings of a path +func (b *SystemBackend) handleTuneReadCommon(ctx context.Context, path string) (*logical.Response, error) { + path = sanitizeMountPath(path) + + sysView := b.Core.router.MatchingSystemView(ctx, path) + if sysView == nil { + b.Backend.Logger().Error("cannot fetch sysview", "path", path) + return handleError(fmt.Errorf("cannot fetch sysview for path %q", path)) + } + + mountEntry := b.Core.router.MatchingMountEntry(ctx, path) + if mountEntry == nil { + b.Backend.Logger().Error("cannot fetch mount entry", "path", path) + return handleError(fmt.Errorf("cannot fetch mount entry for path %q", path)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()), + "max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()), + "force_no_cache": mountEntry.Config.ForceNoCache, + }, + } + + // not tunable so doesn't need to be stored/loaded through synthesizedConfigCache + if mountEntry.ExternalEntropyAccess { + resp.Data["external_entropy_access"] = true + } + + if mountEntry.Table == credentialTableType { + resp.Data["token_type"] = mountEntry.Config.TokenType.String() + } + + if rawVal, ok := mountEntry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok { + resp.Data["audit_non_hmac_request_keys"] = rawVal.([]string) + } + + if rawVal, ok := mountEntry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok { + resp.Data["audit_non_hmac_response_keys"] = rawVal.([]string) + } + + if len(mountEntry.Config.ListingVisibility) > 0 { + resp.Data["listing_visibility"] = mountEntry.Config.ListingVisibility + } + + if rawVal, ok := mountEntry.synthesizedConfigCache.Load("passthrough_request_headers"); ok { + resp.Data["passthrough_request_headers"] = rawVal.([]string) + } + + if rawVal, ok := mountEntry.synthesizedConfigCache.Load("allowed_response_headers"); ok { + resp.Data["allowed_response_headers"] = rawVal.([]string) + } + + if len(mountEntry.Options) > 0 { + resp.Data["options"] = mountEntry.Options + } + + return resp, nil +} + +// handleAuthTuneWrite is used to set config settings on an auth path +func (b *SystemBackend) handleAuthTuneWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path == "" { + return logical.ErrorResponse("missing path"), nil + } + + return b.handleTuneWriteCommon(ctx, "auth/"+path, data) +} + +// handleMountTuneWrite is used to set config settings on a backend +func (b *SystemBackend) handleMountTuneWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path == "" { + return logical.ErrorResponse("missing path"), nil + } + + // This call will write both logical backend's configuration as well as auth methods'. + // Retaining this behavior for backward compatibility. If this behavior is not desired, + // an error can be returned if path has a prefix of "auth/". + return b.handleTuneWriteCommon(ctx, path, data) +} + +// handleTuneWriteCommon is used to set config settings on a path +func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + + path = sanitizeMountPath(path) + + // Prevent protected paths from being changed + for _, p := range untunableMounts { + if strings.HasPrefix(path, p) { + b.Backend.Logger().Error("cannot tune this mount", "path", path) + return handleError(fmt.Errorf("cannot tune %q", path)) + } + } + + mountEntry := b.Core.router.MatchingMountEntry(ctx, path) + if mountEntry == nil { + b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path) + return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path)) + } + if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + var lock *sync.RWMutex + switch { + case strings.HasPrefix(path, credentialRoutePrefix): + lock = &b.Core.authLock + default: + lock = &b.Core.mountsLock + } + + lock.Lock() + defer lock.Unlock() + + // Check again after grabbing the lock + mountEntry = b.Core.router.MatchingMountEntry(ctx, path) + if mountEntry == nil { + b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path) + return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path)) + } + if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Timing configuration parameters + { + var newDefault, newMax time.Duration + defTTL := data.Get("default_lease_ttl").(string) + switch defTTL { + case "": + newDefault = mountEntry.Config.DefaultLeaseTTL + case "system": + newDefault = time.Duration(0) + default: + tmpDef, err := parseutil.ParseDurationSecond(defTTL) + if err != nil { + return handleError(err) + } + newDefault = tmpDef + } + + maxTTL := data.Get("max_lease_ttl").(string) + switch maxTTL { + case "": + newMax = mountEntry.Config.MaxLeaseTTL + case "system": + newMax = time.Duration(0) + default: + tmpMax, err := parseutil.ParseDurationSecond(maxTTL) + if err != nil { + return handleError(err) + } + newMax = tmpMax + } + + if newDefault != mountEntry.Config.DefaultLeaseTTL || + newMax != mountEntry.Config.MaxLeaseTTL { + + if err := b.tuneMountTTLs(ctx, path, mountEntry, newDefault, newMax); err != nil { + b.Backend.Logger().Error("tuning failed", "path", path, "error", err) + return handleError(err) + } + } + } + + if rawVal, ok := data.GetOk("description"); ok { + description := rawVal.(string) + + oldDesc := mountEntry.Description + mountEntry.Description = description + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Description = oldDesc + return handleError(err) + } + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of description successful", "path", path, "description", description) + } + } + + if rawVal, ok := data.GetOk("audit_non_hmac_request_keys"); ok { + auditNonHMACRequestKeys := rawVal.([]string) + + oldVal := mountEntry.Config.AuditNonHMACRequestKeys + mountEntry.Config.AuditNonHMACRequestKeys = auditNonHMACRequestKeys + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Config.AuditNonHMACRequestKeys = oldVal + return handleError(err) + } + + mountEntry.SyncCache() + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of audit_non_hmac_request_keys successful", "path", path) + } + } + + if rawVal, ok := data.GetOk("audit_non_hmac_response_keys"); ok { + auditNonHMACResponseKeys := rawVal.([]string) + + oldVal := mountEntry.Config.AuditNonHMACResponseKeys + mountEntry.Config.AuditNonHMACResponseKeys = auditNonHMACResponseKeys + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Config.AuditNonHMACResponseKeys = oldVal + return handleError(err) + } + + mountEntry.SyncCache() + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of audit_non_hmac_response_keys successful", "path", path) + } + } + + if rawVal, ok := data.GetOk("listing_visibility"); ok { + lvString := rawVal.(string) + listingVisibility := ListingVisibilityType(lvString) + + if err := checkListingVisibility(listingVisibility); err != nil { + return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", listingVisibility)), nil + } + + oldVal := mountEntry.Config.ListingVisibility + mountEntry.Config.ListingVisibility = listingVisibility + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Config.ListingVisibility = oldVal + return handleError(err) + } + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of listing_visibility successful", "path", path) + } + } + + if rawVal, ok := data.GetOk("token_type"); ok { + if !strings.HasPrefix(path, "auth/") { + return logical.ErrorResponse(fmt.Sprintf("'token_type' can only be modified on auth mounts")), logical.ErrInvalidRequest + } + if mountEntry.Type == "token" || mountEntry.Type == "ns_token" { + return logical.ErrorResponse(fmt.Sprintf("'token_type' cannot be set for 'token' or 'ns_token' auth mounts")), logical.ErrInvalidRequest + } + + tokenType := logical.TokenTypeDefaultService + ttString := rawVal.(string) + + switch ttString { + case "", "default-service": + case "default-batch": + tokenType = logical.TokenTypeDefaultBatch + case "service": + tokenType = logical.TokenTypeService + case "batch": + tokenType = logical.TokenTypeBatch + default: + return logical.ErrorResponse(fmt.Sprintf( + "invalid value for 'token_type'")), logical.ErrInvalidRequest + } + + oldVal := mountEntry.Config.TokenType + mountEntry.Config.TokenType = tokenType + + // Update the mount table + if err := b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local); err != nil { + mountEntry.Config.TokenType = oldVal + return handleError(err) + } + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of token_type successful", "path", path, "token_type", ttString) + } + } + + if rawVal, ok := data.GetOk("passthrough_request_headers"); ok { + headers := rawVal.([]string) + + oldVal := mountEntry.Config.PassthroughRequestHeaders + mountEntry.Config.PassthroughRequestHeaders = headers + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Config.PassthroughRequestHeaders = oldVal + return handleError(err) + } + + mountEntry.SyncCache() + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of passthrough_request_headers successful", "path", path) + } + } + + if rawVal, ok := data.GetOk("allowed_response_headers"); ok { + headers := rawVal.([]string) + + oldVal := mountEntry.Config.AllowedResponseHeaders + mountEntry.Config.AllowedResponseHeaders = headers + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Config.AllowedResponseHeaders = oldVal + return handleError(err) + } + + mountEntry.SyncCache() + + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of allowed_response_headers successful", "path", path) + } + } + + var err error + var resp *logical.Response + var options map[string]string + if optionsRaw, ok := data.GetOk("options"); ok { + options = optionsRaw.(map[string]string) + } + + if len(options) > 0 { + b.Core.logger.Info("mount tuning of options", "path", path, "options", options) + newOptions := make(map[string]string) + var kvUpgraded bool + + // The version options should only apply to the KV mount, check that first + if v, ok := options["version"]; ok { + // Special case to make sure we can not disable versioning once it's + // enabled. If the vkv backend suports downgrading this can be removed. + meVersion, err := parseutil.ParseInt(mountEntry.Options["version"]) + if err != nil { + return nil, errwrap.Wrapf("unable to parse mount entry: {{err}}", err) + } + optVersion, err := parseutil.ParseInt(v) + if err != nil { + return handleError(errwrap.Wrapf("unable to parse options: {{err}}", err)) + } + + // Only accept valid versions + switch optVersion { + case 1: + case 2: + default: + return logical.ErrorResponse(fmt.Sprintf("invalid version provided: %d", optVersion)), logical.ErrInvalidRequest + } + + if meVersion > optVersion { + // Return early if version option asks for a downgrade + return logical.ErrorResponse(fmt.Sprintf("cannot downgrade mount from version %d", meVersion)), logical.ErrInvalidRequest + } + if meVersion < optVersion { + kvUpgraded = true + resp = &logical.Response{} + resp.AddWarning(fmt.Sprintf("Upgrading mount from version %d to version %d. This mount will be unavailable for a brief period and will resume service shortly.", meVersion, optVersion)) + } + } + + // Upsert options value to a copy of the existing mountEntry's options + for k, v := range mountEntry.Options { + newOptions[k] = v + } + for k, v := range options { + // If the value of the provided option is empty, delete the key We + // special-case the version value here to guard against KV downgrades, but + // this piece could potentially be refactored in the future to be non-KV + // specific. + if len(v) == 0 && k != "version" { + delete(newOptions, k) + } else { + newOptions[k] = v + } + } + + // Update the mount table + oldVal := mountEntry.Options + mountEntry.Options = newOptions + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local) + } + if err != nil { + mountEntry.Options = oldVal + return handleError(err) + } + + // Reload the backend to kick off the upgrade process. It should only apply to KV backend so we + // trigger based on the version logic above. + if kvUpgraded { + err = b.Core.reloadBackendCommon(ctx, mountEntry, strings.HasPrefix(path, credentialRoutePrefix)) + if err != nil { + b.Core.logger.Error("mount tuning of options: could not reload backend", "error", err, "path", path, "options", options) + } + + } + } + + return resp, nil +} + +// handleLease is use to view the metadata for a given LeaseID +func (b *SystemBackend) handleLeaseLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + leaseID := data.Get("lease_id").(string) + if leaseID == "" { + return logical.ErrorResponse("lease_id must be specified"), + logical.ErrInvalidRequest + } + + leaseTimes, err := b.Core.expiration.FetchLeaseTimes(ctx, leaseID) + if err != nil { + b.Backend.Logger().Error("error retrieving lease", "lease_id", leaseID, "error", err) + return handleError(err) + } + if leaseTimes == nil { + return logical.ErrorResponse("invalid lease"), logical.ErrInvalidRequest + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "id": leaseID, + "issue_time": leaseTimes.IssueTime, + "expire_time": nil, + "last_renewal": nil, + "ttl": int64(0), + }, + } + renewable, _ := leaseTimes.renewable() + resp.Data["renewable"] = renewable + + if !leaseTimes.LastRenewalTime.IsZero() { + resp.Data["last_renewal"] = leaseTimes.LastRenewalTime + } + if !leaseTimes.ExpireTime.IsZero() { + resp.Data["expire_time"] = leaseTimes.ExpireTime + resp.Data["ttl"] = leaseTimes.ttl() + } + return resp, nil +} + +func (b *SystemBackend) handleLeaseLookupList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + prefix := data.Get("prefix").(string) + if prefix != "" && !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + view := b.Core.expiration.leaseView(ns) + keys, err := view.List(ctx, prefix) + if err != nil { + b.Backend.Logger().Error("error listing leases", "prefix", prefix, "error", err) + return handleErrorNoReadOnlyForward(err) + } + return logical.ListResponse(keys), nil +} + +// handleRenew is used to renew a lease with a given LeaseID +func (b *SystemBackend) handleRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get all the options + leaseID := data.Get("lease_id").(string) + if leaseID == "" { + leaseID = data.Get("url_lease_id").(string) + } + if leaseID == "" { + return logical.ErrorResponse("lease_id must be specified"), + logical.ErrInvalidRequest + } + incrementRaw := data.Get("increment").(int) + + // Convert the increment + increment := time.Duration(incrementRaw) * time.Second + + // Invoke the expiration manager directly + resp, err := b.Core.expiration.Renew(ctx, leaseID, increment) + if err != nil { + b.Backend.Logger().Error("lease renewal failed", "lease_id", leaseID, "error", err) + return handleErrorNoReadOnlyForward(err) + } + return resp, err +} + +// handleRevoke is used to revoke a given LeaseID +func (b *SystemBackend) handleRevoke(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get all the options + leaseID := data.Get("lease_id").(string) + if leaseID == "" { + leaseID = data.Get("url_lease_id").(string) + } + if leaseID == "" { + return logical.ErrorResponse("lease_id must be specified"), + logical.ErrInvalidRequest + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + revokeCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns) + if data.Get("sync").(bool) { + // Invoke the expiration manager directly + if err := b.Core.expiration.Revoke(revokeCtx, leaseID); err != nil { + b.Backend.Logger().Error("lease revocation failed", "lease_id", leaseID, "error", err) + return handleErrorNoReadOnlyForward(err) + } + + return nil, nil + } + + if err := b.Core.expiration.LazyRevoke(revokeCtx, leaseID); err != nil { + b.Backend.Logger().Error("lease revocation failed", "lease_id", leaseID, "error", err) + return handleErrorNoReadOnlyForward(err) + } + + return logical.RespondWithStatusCode(nil, nil, http.StatusAccepted) +} + +// handleRevokePrefix is used to revoke a prefix with many LeaseIDs +func (b *SystemBackend) handleRevokePrefix(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRevokePrefixCommon(ctx, req, data, false, data.Get("sync").(bool)) +} + +// handleRevokeForce is used to revoke a prefix with many LeaseIDs, ignoring errors +func (b *SystemBackend) handleRevokeForce(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRevokePrefixCommon(ctx, req, data, true, true) +} + +// handleRevokePrefixCommon is used to revoke a prefix with many LeaseIDs +func (b *SystemBackend) handleRevokePrefixCommon(ctx context.Context, + req *logical.Request, data *framework.FieldData, force, sync bool) (*logical.Response, error) { + // Get all the options + prefix := data.Get("prefix").(string) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + // Invoke the expiration manager directly + revokeCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns) + if force { + err = b.Core.expiration.RevokeForce(revokeCtx, prefix) + } else { + err = b.Core.expiration.RevokePrefix(revokeCtx, prefix, sync) + } + if err != nil { + b.Backend.Logger().Error("revoke prefix failed", "prefix", prefix, "error", err) + return handleErrorNoReadOnlyForward(err) + } + + if sync { + return nil, nil + } + + return logical.RespondWithStatusCode(nil, nil, http.StatusAccepted) +} + +// handleAuthTable handles the "auth" endpoint to provide the auth table +func (b *SystemBackend) handleAuthTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + b.Core.authLock.RLock() + defer b.Core.authLock.RUnlock() + + resp := &logical.Response{ + Data: make(map[string]interface{}), + } + + for _, entry := range b.Core.auth.Entries { + // Only show entries for current namespace + if entry.Namespace().Path != ns.Path { + continue + } + + cont, err := b.Core.checkReplicatedFiltering(ctx, entry, credentialRoutePrefix) + if err != nil { + return nil, err + } + if cont { + continue + } + + info := mountInfo(entry) + resp.Data[entry.Path] = info + } + + return resp, nil +} + +// handleEnableAuth is used to enable a new credential backend +func (b *SystemBackend) handleEnableAuth(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + local := data.Get("local").(bool) + + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Get all the options + path := data.Get("path").(string) + path = sanitizeMountPath(path) + logicalType := data.Get("type").(string) + description := data.Get("description").(string) + pluginName := data.Get("plugin_name").(string) + sealWrap := data.Get("seal_wrap").(bool) + externalEntropyAccess := data.Get("external_entropy_access").(bool) + options := data.Get("options").(map[string]string) + + var config MountConfig + var apiConfig APIMountConfig + + configMap := data.Get("config").(map[string]interface{}) + if configMap != nil && len(configMap) != 0 { + err := mapstructure.Decode(configMap, &apiConfig) + if err != nil { + return logical.ErrorResponse( + "unable to convert given auth config information"), + logical.ErrInvalidRequest + } + } + + switch apiConfig.DefaultLeaseTTL { + case "": + case "system": + default: + tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)), + logical.ErrInvalidRequest + } + config.DefaultLeaseTTL = tmpDef + } + + switch apiConfig.MaxLeaseTTL { + case "": + case "system": + default: + tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)), + logical.ErrInvalidRequest + } + config.MaxLeaseTTL = tmpMax + } + + if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL { + return logical.ErrorResponse( + "given default lease TTL greater than given max lease TTL"), + logical.ErrInvalidRequest + } + + if config.DefaultLeaseTTL > b.Core.maxLeaseTTL && config.MaxLeaseTTL == 0 { + return logical.ErrorResponse(fmt.Sprintf( + "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))), + logical.ErrInvalidRequest + } + + switch apiConfig.TokenType { + case "", "default-service": + config.TokenType = logical.TokenTypeDefaultService + case "default-batch": + config.TokenType = logical.TokenTypeDefaultBatch + case "service": + config.TokenType = logical.TokenTypeService + case "batch": + config.TokenType = logical.TokenTypeBatch + default: + return logical.ErrorResponse(fmt.Sprintf( + "invalid value for 'token_type'")), logical.ErrInvalidRequest + } + + switch logicalType { + case "": + return logical.ErrorResponse( + "backend type must be specified as a string"), + logical.ErrInvalidRequest + case "plugin": + // Only set plugin name if mount is of type plugin, with apiConfig.PluginName + // option taking precedence. + switch { + case apiConfig.PluginName != "": + logicalType = apiConfig.PluginName + case pluginName != "": + logicalType = pluginName + default: + return logical.ErrorResponse( + "plugin_name must be provided for plugin backend"), + logical.ErrInvalidRequest + } + } + + if options != nil && options["version"] != "" { + return logical.ErrorResponse(fmt.Sprintf( + "auth method %q does not allow setting a version", logicalType)), + logical.ErrInvalidRequest + } + + if err := checkListingVisibility(apiConfig.ListingVisibility); err != nil { + return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", apiConfig.ListingVisibility)), nil + } + config.ListingVisibility = apiConfig.ListingVisibility + + if len(apiConfig.AuditNonHMACRequestKeys) > 0 { + config.AuditNonHMACRequestKeys = apiConfig.AuditNonHMACRequestKeys + } + if len(apiConfig.AuditNonHMACResponseKeys) > 0 { + config.AuditNonHMACResponseKeys = apiConfig.AuditNonHMACResponseKeys + } + if len(apiConfig.PassthroughRequestHeaders) > 0 { + config.PassthroughRequestHeaders = apiConfig.PassthroughRequestHeaders + } + if len(apiConfig.AllowedResponseHeaders) > 0 { + config.AllowedResponseHeaders = apiConfig.AllowedResponseHeaders + } + + // Create the mount entry + me := &MountEntry{ + Table: credentialTableType, + Path: path, + Type: logicalType, + Description: description, + Config: config, + Local: local, + SealWrap: sealWrap, + ExternalEntropyAccess: externalEntropyAccess, + Options: options, + } + + // Attempt enabling + if err := b.Core.enableCredential(ctx, me); err != nil { + b.Backend.Logger().Error("error occurred during enable credential", "path", me.Path, "error", err) + return handleError(err) + } + return nil, nil +} + +// handleDisableAuth is used to disable a credential backend +func (b *SystemBackend) handleDisableAuth(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + path = sanitizeMountPath(path) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + fullPath := credentialRoutePrefix + path + + repState := b.Core.ReplicationState() + entry := b.Core.router.MatchingMountEntry(ctx, fullPath) + + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // We return success when the mount does not exists to not expose if the + // mount existed or not + match := b.Core.router.MatchingMount(ctx, fullPath) + if match == "" || ns.Path+fullPath != match { + return nil, nil + } + + _, found := b.Core.router.MatchingStoragePrefixByAPIPath(ctx, fullPath) + if !found { + b.Backend.Logger().Error("unable to find storage for path", "path", fullPath) + return handleError(fmt.Errorf("unable to find storage for path: %q", fullPath)) + } + + // Attempt disable + if err := b.Core.disableCredential(ctx, path); err != nil { + b.Backend.Logger().Error("disable auth mount failed", "path", path, "error", err) + return handleError(err) + } + + // Get the view path if available + var viewPath string + if entry != nil { + viewPath = entry.ViewPath() + } + + // Remove from filtered mounts + if err := b.Core.removePathFromFilteredPaths(ctx, fullPath, viewPath); err != nil { + b.Backend.Logger().Error("filtered path removal failed", path, "error", err) + return handleError(err) + } + + return nil, nil +} + +// handlePoliciesList handles /sys/policy/ and /sys/policies/ endpoints to provide the enabled policies +func (b *SystemBackend) handlePoliciesList(policyType PolicyType) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + policies, err := b.Core.policyStore.ListPolicies(ctx, policyType) + if err != nil { + return nil, err + } + + switch policyType { + case PolicyTypeACL: + // Add the special "root" policy if not egp and we are at the root namespace + if ns.ID == namespace.RootNamespaceID { + policies = append(policies, "root") + } + resp := logical.ListResponse(policies) + + // If the request is from sys/policy/ we handle backwards compatibility + if strings.HasPrefix(req.Path, "policy") { + resp.Data["policies"] = resp.Data["keys"] + } + return resp, nil + + case PolicyTypeRGP: + return logical.ListResponse(policies), nil + + case PolicyTypeEGP: + nsScopedKeyInfo := getEGPListResponseKeyInfo(b, ns) + return &logical.Response{ + Data: map[string]interface{}{ + "keys": policies, + "key_info": nsScopedKeyInfo, + }, + }, nil + } + + return logical.ErrorResponse("unknown policy type"), nil + } +} + +// handlePoliciesRead handles the "/sys/policy/" and "/sys/policies//" endpoints to read a policy +func (b *SystemBackend) handlePoliciesRead(policyType PolicyType) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + policy, err := b.Core.policyStore.GetPolicy(ctx, name, policyType) + if err != nil { + return handleError(err) + } + + if policy == nil { + return nil, nil + } + + // If the request is from sys/policy/ we handle backwards compatibility + var respDataPolicyName string + if policyType == PolicyTypeACL && strings.HasPrefix(req.Path, "policy") { + respDataPolicyName = "rules" + } else { + respDataPolicyName = "policy" + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": policy.Name, + respDataPolicyName: policy.Raw, + }, + } + + switch policy.Type { + case PolicyTypeRGP, PolicyTypeEGP: + addSentinelPolicyData(resp.Data, policy) + } + + return resp, nil + } +} + +// handlePoliciesSet handles the "/sys/policy/" and "/sys/policies//" endpoints to set a policy +func (b *SystemBackend) handlePoliciesSet(policyType PolicyType) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var resp *logical.Response + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + policy := &Policy{ + Name: strings.ToLower(data.Get("name").(string)), + Type: policyType, + namespace: ns, + } + if policy.Name == "" { + return logical.ErrorResponse("policy name must be provided in the URL"), nil + } + + policy.Raw = data.Get("policy").(string) + if policy.Raw == "" && policyType == PolicyTypeACL && strings.HasPrefix(req.Path, "policy") { + policy.Raw = data.Get("rules").(string) + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("'rules' is deprecated, please use 'policy' instead") + } + if policy.Raw == "" { + return logical.ErrorResponse("'policy' parameter not supplied or empty"), nil + } + + if polBytes, err := base64.StdEncoding.DecodeString(policy.Raw); err == nil { + policy.Raw = string(polBytes) + } + + switch policyType { + case PolicyTypeACL: + p, err := ParseACLPolicy(ns, policy.Raw) + if err != nil { + return handleError(err) + } + policy.Paths = p.Paths + policy.Templated = p.Templated + + case PolicyTypeRGP, PolicyTypeEGP: + + default: + return logical.ErrorResponse("unknown policy type"), nil + } + + if policy.Type == PolicyTypeRGP || policy.Type == PolicyTypeEGP { + if errResp := inputSentinelPolicyData(data, policy); errResp != nil { + return errResp, nil + } + } + + // Update the policy + if err := b.Core.policyStore.SetPolicy(ctx, policy); err != nil { + return handleError(err) + } + return resp, nil + } +} + +func (b *SystemBackend) handlePoliciesDelete(policyType PolicyType) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + if err := b.Core.policyStore.DeletePolicy(ctx, name, policyType); err != nil { + return handleError(err) + } + return nil, nil + } +} + +// handleAuditTable handles the "audit" endpoint to provide the audit table +func (b *SystemBackend) handleAuditTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.Core.auditLock.RLock() + defer b.Core.auditLock.RUnlock() + + resp := &logical.Response{ + Data: make(map[string]interface{}), + } + for _, entry := range b.Core.audit.Entries { + info := map[string]interface{}{ + "path": entry.Path, + "type": entry.Type, + "description": entry.Description, + "options": entry.Options, + "local": entry.Local, + } + resp.Data[entry.Path] = info + } + return resp, nil +} + +// handleAuditHash is used to fetch the hash of the given input data with the +// specified audit backend's salt +func (b *SystemBackend) handleAuditHash(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + input := data.Get("input").(string) + if input == "" { + return logical.ErrorResponse("the \"input\" parameter is empty"), nil + } + + path = sanitizeMountPath(path) + + hash, err := b.Core.auditBroker.GetHash(ctx, path, input) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "hash": hash, + }, + }, nil +} + +// handleEnableAudit is used to enable a new audit backend +func (b *SystemBackend) handleEnableAudit(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + + local := data.Get("local").(bool) + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Get all the options + path := data.Get("path").(string) + backendType := data.Get("type").(string) + description := data.Get("description").(string) + options := data.Get("options").(map[string]string) + + // Create the mount entry + me := &MountEntry{ + Table: auditTableType, + Path: path, + Type: backendType, + Description: description, + Options: options, + Local: local, + } + + // Attempt enabling + if err := b.Core.enableAudit(ctx, me, true); err != nil { + b.Backend.Logger().Error("enable audit mount failed", "path", me.Path, "error", err) + return handleError(err) + } + return nil, nil +} + +// handleDisableAudit is used to disable an audit backend +func (b *SystemBackend) handleDisableAudit(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + + if !strings.HasSuffix(path, "/") { + path += "/" + } + + if path == "/" { + return handleError(errors.New("audit device path must be specified")) + } + + b.Core.auditLock.RLock() + table := b.Core.audit.shallowClone() + entry, err := table.find(ctx, path) + b.Core.auditLock.RUnlock() + + if err != nil { + return handleError(err) + } + if entry == nil { + return nil, nil + } + + repState := b.Core.ReplicationState() + + // If we are a performance secondary cluster we should forward the request + // to the primary. We fail early here since the view in use isn't marked as + // readonly + if !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + + // Attempt disable + if existed, err := b.Core.disableAudit(ctx, path, true); existed && err != nil { + b.Backend.Logger().Error("disable audit mount failed", "path", path, "error", err) + return handleError(err) + } + return nil, nil +} + +func (b *SystemBackend) handleConfigUIHeadersRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + header := data.Get("header").(string) + + value, err := b.Core.uiConfig.GetHeader(ctx, header) + if err != nil { + return nil, err + } + if value == "" { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "value": value, + }, + }, nil +} + +func (b *SystemBackend) handleConfigUIHeadersList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + headers, err := b.Core.uiConfig.HeaderKeys(ctx) + if err != nil { + return nil, err + } + if len(headers) == 0 { + return nil, nil + } + + return logical.ListResponse(headers), nil +} + +func (b *SystemBackend) handleConfigUIHeadersUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + header := data.Get("header").(string) + values := data.Get("values").([]string) + if header == "" || len(values) == 0 { + return logical.ErrorResponse("header and values must be specified"), logical.ErrInvalidRequest + } + + lowerHeader := strings.ToLower(header) + if strings.HasPrefix(lowerHeader, "x-vault-") { + return logical.ErrorResponse("X-Vault headers cannot be set"), logical.ErrInvalidRequest + } + + // Translate the list of values to the valid header string + value := http.Header{} + for _, v := range values { + value.Add(header, v) + } + err := b.Core.uiConfig.SetHeader(ctx, header, value.Get(header)) + if err != nil { + return nil, err + } + + // Warn when overriding the CSP + resp := &logical.Response{} + if lowerHeader == "content-security-policy" { + resp.AddWarning("overriding default Content-Security-Policy which is secure by default, proceed with caution") + } + + return resp, nil +} + +func (b *SystemBackend) handleConfigUIHeadersDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + header := data.Get("header").(string) + err := b.Core.uiConfig.DeleteHeader(ctx, header) + if err != nil { + return nil, err + } + return nil, nil +} + +// handleKeyStatus returns status information about the backend key +func (b *SystemBackend) handleKeyStatus(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the key info + info, err := b.Core.barrier.ActiveKeyInfo() + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "term": info.Term, + "install_time": info.InstallTime.Format(time.RFC3339Nano), + }, + } + return resp, nil +} + +// handleRotate is used to trigger a key rotation +func (b *SystemBackend) handleRotate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + repState := b.Core.ReplicationState() + if repState.HasState(consts.ReplicationPerformanceSecondary) { + return logical.ErrorResponse("cannot rotate on a replication secondary"), nil + } + + // Rotate to the new term + newTerm, err := b.Core.barrier.Rotate(ctx, b.Core.secureRandomReader) + if err != nil { + b.Backend.Logger().Error("failed to create new encryption key", "error", err) + return handleError(err) + } + b.Backend.Logger().Info("installed new encryption key") + + // In HA mode, we need to an upgrade path for the standby instances + if b.Core.ha != nil { + // Create the upgrade path to the new term + if err := b.Core.barrier.CreateUpgrade(ctx, newTerm); err != nil { + b.Backend.Logger().Error("failed to create new upgrade", "term", newTerm, "error", err) + } + + // Schedule the destroy of the upgrade path + time.AfterFunc(KeyRotateGracePeriod, func() { + b.Backend.Logger().Debug("cleaning up upgrade keys", "waited", KeyRotateGracePeriod) + if err := b.Core.barrier.DestroyUpgrade(b.Core.activeContext, newTerm); err != nil { + b.Backend.Logger().Error("failed to destroy upgrade", "term", newTerm, "error", err) + } + }) + } + + // Write to the canary path, which will force a synchronous truing during + // replication + if err := b.Core.barrier.Put(ctx, &logical.StorageEntry{ + Key: coreKeyringCanaryPath, + Value: []byte(fmt.Sprintf("new-rotation-term-%d", newTerm)), + }); err != nil { + b.Core.logger.Error("error saving keyring canary", "error", err) + return nil, errwrap.Wrapf("failed to save keyring canary: {{err}}", err) + } + + return nil, nil +} + +func (b *SystemBackend) handleWrappingPubkey(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + x, _ := b.Core.wrappingJWTKey.X.MarshalText() + y, _ := b.Core.wrappingJWTKey.Y.MarshalText() + return &logical.Response{ + Data: map[string]interface{}{ + "jwt_x": string(x), + "jwt_y": string(y), + "jwt_curve": corePrivateKeyTypeP521, + }, + }, nil +} + +func (b *SystemBackend) handleWrappingWrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if req.WrapInfo == nil || req.WrapInfo.TTL == 0 { + return logical.ErrorResponse("endpoint requires response wrapping to be used"), logical.ErrInvalidRequest + } + + // N.B.: Do *NOT* allow JWT wrapping tokens to be created through this + // endpoint. JWTs are signed so if we don't allow users to create wrapping + // tokens using them we can ensure that an operator can't spoof a legit JWT + // wrapped token, which makes certain init/rekey/generate-root cases have + // better properties. + req.WrapInfo.Format = "uuid" + + return &logical.Response{ + Data: data.Raw, + }, nil +} + +// handleWrappingUnwrap will unwrap a response wrapping token or complete a +// request that required a control group. +func (b *SystemBackend) handleWrappingUnwrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // If a third party is unwrapping (rather than the calling token being the + // wrapping token) we detect this so that we can revoke the original + // wrapping token after reading it + var thirdParty bool + + token := data.Get("token").(string) + if token != "" { + thirdParty = true + } else { + token = req.ClientToken + } + + // Get the policies so we can determine if this is a normal response + // wrapping request or a control group token. + // + // We use lookupTainted here because the token might have already been used + // by handleRequest(), this happens when it's a normal response wrapping + // request and the token was provided "first party". We want to inspect the + // token policies but will not use this token entry for anything else. + te, err := b.Core.tokenStore.lookupTainted(ctx, token) + if err != nil { + return nil, err + } + if te == nil { + return nil, nil + } + if len(te.Policies) != 1 { + return nil, errors.New("token is not a valid unwrap token") + } + + unwrapNS, err := NamespaceByID(ctx, te.NamespaceID, b.Core) + if err != nil { + return nil, err + } + unwrapCtx := namespace.ContextWithNamespace(ctx, unwrapNS) + + var response string + switch te.Policies[0] { + case controlGroupPolicyName: + response, err = controlGroupUnwrap(unwrapCtx, b, token, thirdParty) + case responseWrappingPolicyName: + response, err = b.responseWrappingUnwrap(unwrapCtx, te, thirdParty) + } + if err != nil { + var respErr *logical.Response + if len(response) > 0 { + respErr = logical.ErrorResponse(response) + } + + return respErr, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{}, + } + + // Most of the time we want to just send over the marshalled HTTP bytes. + // However there is a sad separate case: if the original response was using + // bare values we need to use those or else what comes back is garbled. + httpResp := &logical.HTTPResponse{} + err = jsonutil.DecodeJSON([]byte(response), httpResp) + if err != nil { + return nil, errwrap.Wrapf("error decoding wrapped response: {{err}}", err) + } + if httpResp.Data != nil && + (httpResp.Data[logical.HTTPStatusCode] != nil || + httpResp.Data[logical.HTTPRawBody] != nil || + httpResp.Data[logical.HTTPContentType] != nil) { + if httpResp.Data[logical.HTTPStatusCode] != nil { + resp.Data[logical.HTTPStatusCode] = httpResp.Data[logical.HTTPStatusCode] + } + if httpResp.Data[logical.HTTPContentType] != nil { + resp.Data[logical.HTTPContentType] = httpResp.Data[logical.HTTPContentType] + } + + rawBody := httpResp.Data[logical.HTTPRawBody] + if rawBody != nil { + // Decode here so that we can audit properly + switch rawBody.(type) { + case string: + // Best effort decoding; if this works, the original value was + // probably a []byte instead of a string, but was marshaled + // when the value was saved, so this restores it as it was + decBytes, err := base64.StdEncoding.DecodeString(rawBody.(string)) + if err == nil { + // We end up with []byte, will not be HMAC'd + resp.Data[logical.HTTPRawBody] = decBytes + } else { + // We end up with string, will be HMAC'd + resp.Data[logical.HTTPRawBody] = rawBody + } + default: + b.Core.Logger().Error("unexpected type of raw body when decoding wrapped token", "type", fmt.Sprintf("%T", rawBody)) + } + + resp.Data[logical.HTTPRawBodyAlreadyJSONDecoded] = true + } + + return resp, nil + } + + if len(response) == 0 { + resp.Data[logical.HTTPStatusCode] = 204 + } else { + resp.Data[logical.HTTPStatusCode] = 200 + resp.Data[logical.HTTPRawBody] = []byte(response) + resp.Data[logical.HTTPContentType] = "application/json" + } + + return resp, nil +} + +// responseWrappingUnwrap will read the stored response in the cubbyhole and +// return the raw HTTP response. +func (b *SystemBackend) responseWrappingUnwrap(ctx context.Context, te *logical.TokenEntry, thirdParty bool) (string, error) { + tokenID := te.ID + if thirdParty { + // Use the token to decrement the use count to avoid a second operation on the token. + _, err := b.Core.tokenStore.UseTokenByID(ctx, tokenID) + if err != nil { + return "", errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err) + } + + defer b.Core.tokenStore.revokeOrphan(ctx, tokenID) + } + + cubbyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "cubbyhole/response", + ClientToken: tokenID, + } + cubbyReq.SetTokenEntry(te) + cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) + if err != nil { + return "", errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + } + if cubbyResp == nil { + return "no information found; wrapping token may be from a previous Vault version", ErrInternalError + } + if cubbyResp != nil && cubbyResp.IsError() { + return cubbyResp.Error().Error(), nil + } + if cubbyResp.Data == nil { + return "wrapping information was nil; wrapping token may be from a previous Vault version", ErrInternalError + } + + responseRaw := cubbyResp.Data["response"] + if responseRaw == nil { + return "", fmt.Errorf("no response found inside the cubbyhole") + } + response, ok := responseRaw.(string) + if !ok { + return "", fmt.Errorf("could not decode response inside the cubbyhole") + } + + return response, nil +} + +func (b *SystemBackend) handleMetrics(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + format := data.Get("format").(string) + if format == "" { + format = metricsutil.FormatFromRequest(req) + } + return b.Core.metricsHelper.ResponseForFormat(format), nil +} + +// handleHostInfo collects and returns host-related information, which includes +// system information, cpu, disk, and memory usage. Any capture-related errors +// returned by the collection method will be returned as response warnings. +func (b *SystemBackend) handleHostInfo(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + resp := &logical.Response{} + info, err := hostutil.CollectHostInfo(ctx) + if err != nil { + // If the error is a HostInfoError, we return them as response warnings + if errs, ok := err.(*multierror.Error); ok { + var warnings []string + for _, mErr := range errs.Errors { + if errwrap.ContainsType(mErr, new(hostutil.HostInfoError)) { + warnings = append(warnings, mErr.Error()) + } else { + // If the error is a multierror, it should only be for + // HostInfoError, but if it's not for any reason, we return + // it as an error to avoid it being swallowed. + return nil, err + } + } + resp.Warnings = warnings + } else { + return nil, err + } + } + + if info == nil { + return nil, errors.New("unable to collect host information: nil HostInfo") + } + + respData := map[string]interface{}{ + "timestamp": info.Timestamp, + } + if info.CPU != nil { + respData["cpu"] = info.CPU + } + if info.CPUTimes != nil { + respData["cpu_times"] = info.CPUTimes + } + if info.Disk != nil { + respData["disk"] = info.Disk + } + if info.Host != nil { + respData["host"] = info.Host + } + if info.Memory != nil { + respData["memory"] = info.Memory + } + resp.Data = respData + + return resp, nil +} + +func (b *SystemBackend) handleWrappingLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // This ordering of lookups has been validated already in the wrapping + // validation func, we're just doing this for a safety check + token := data.Get("token").(string) + if token == "" { + token = req.ClientToken + if token == "" { + return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest + } + } + + te, err := b.Core.tokenStore.lookupTainted(ctx, token) + if err != nil { + return nil, err + } + if te == nil { + return nil, nil + } + if len(te.Policies) != 1 { + return nil, errors.New("token is not a valid unwrap token") + } + + cubbyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "cubbyhole/wrapinfo", + ClientToken: token, + } + cubbyReq.SetTokenEntry(te) + cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) + if err != nil { + return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + } + if cubbyResp == nil { + return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil + } + if cubbyResp != nil && cubbyResp.IsError() { + return cubbyResp, nil + } + if cubbyResp.Data == nil { + return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil + } + + creationTTLRaw := cubbyResp.Data["creation_ttl"] + creationTime := cubbyResp.Data["creation_time"] + creationPath := cubbyResp.Data["creation_path"] + + resp := &logical.Response{ + Data: map[string]interface{}{}, + } + if creationTTLRaw != nil { + creationTTL, err := creationTTLRaw.(json.Number).Int64() + if err != nil { + return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err) + } + resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds() + } + if creationTime != nil { + // This was JSON marshaled so it's already a string in RFC3339 format + resp.Data["creation_time"] = cubbyResp.Data["creation_time"] + } + if creationPath != nil { + resp.Data["creation_path"] = cubbyResp.Data["creation_path"] + } + + return resp, nil +} + +func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // If a third party is rewrapping (rather than the calling token being the + // wrapping token) we detect this so that we can revoke the original + // wrapping token after reading it. Right now wrapped tokens can't unwrap + // themselves, but in case we change it, this will be ready to do the right + // thing. + var thirdParty bool + + token := data.Get("token").(string) + if token != "" { + thirdParty = true + } else { + token = req.ClientToken + } + + te, err := b.Core.tokenStore.lookupTainted(ctx, token) + if err != nil { + return nil, err + } + if te == nil { + return nil, nil + } + if len(te.Policies) != 1 { + return nil, errors.New("token is not a valid unwrap token") + } + + if thirdParty { + // Use the token to decrement the use count to avoid a second operation on the token. + _, err := b.Core.tokenStore.UseTokenByID(ctx, token) + if err != nil { + return nil, errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err) + } + defer b.Core.tokenStore.revokeOrphan(ctx, token) + } + + // Fetch the original TTL + cubbyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "cubbyhole/wrapinfo", + ClientToken: token, + } + cubbyReq.SetTokenEntry(te) + cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) + if err != nil { + return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + } + if cubbyResp == nil { + return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil + } + if cubbyResp != nil && cubbyResp.IsError() { + return cubbyResp, nil + } + if cubbyResp.Data == nil { + return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil + } + + // Set the creation TTL on the request + creationTTLRaw := cubbyResp.Data["creation_ttl"] + if creationTTLRaw == nil { + return nil, fmt.Errorf("creation_ttl value in wrapping information was nil") + } + creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64() + if err != nil { + return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err) + } + + // Get creation_path to return as the response later + creationPathRaw := cubbyResp.Data["creation_path"] + if creationPathRaw == nil { + return nil, fmt.Errorf("creation_path value in wrapping information was nil") + } + creationPath := creationPathRaw.(string) + + // Fetch the original response and return it as the data for the new response + cubbyReq = &logical.Request{ + Operation: logical.ReadOperation, + Path: "cubbyhole/response", + ClientToken: token, + } + cubbyReq.SetTokenEntry(te) + cubbyResp, err = b.Core.router.Route(ctx, cubbyReq) + if err != nil { + return nil, errwrap.Wrapf("error looking up response: {{err}}", err) + } + if cubbyResp == nil { + return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil + } + if cubbyResp != nil && cubbyResp.IsError() { + return cubbyResp, nil + } + if cubbyResp.Data == nil { + return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil + } + + response := cubbyResp.Data["response"] + if response == nil { + return nil, fmt.Errorf("no response found inside the cubbyhole") + } + + // Return response in "response"; wrapping code will detect the rewrap and + // slot in instead of nesting + return &logical.Response{ + Data: map[string]interface{}{ + "response": response, + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: time.Duration(creationTTL), + CreationPath: creationPath, + }, + }, nil +} + +func (b *SystemBackend) pathHashWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + inputB64 := d.Get("input").(string) + format := d.Get("format").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + retBytes := hf.Sum(nil) + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(retBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(retBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "sum": retStr, + }, + } + return resp, nil +} + +func (b *SystemBackend) pathRandomWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + bytes := 0 + var err error + strBytes := d.Get("urlbytes").(string) + if strBytes != "" { + bytes, err = strconv.Atoi(strBytes) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil + } + } else { + bytes = d.Get("bytes").(int) + } + format := d.Get("format").(string) + + if bytes < 1 { + return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil + } + + if bytes > maxBytes { + return logical.ErrorResponse(`"bytes" should be less than %s`, maxBytes), nil + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil + } + + randBytes, err := uuid.GenerateRandomBytes(bytes) + if err != nil { + return nil, err + } + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(randBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(randBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "random_bytes": retStr, + }, + } + return resp, nil +} + +func hasMountAccess(ctx context.Context, acl *ACL, path string) bool { + ns, err := namespace.FromContext(ctx) + if err != nil { + return false + } + + // If a policy is giving us direct access to the mount path then we can do + // a fast return. + capabilities := acl.Capabilities(ctx, ns.TrimmedPath(path)) + if !strutil.StrListContains(capabilities, DenyCapability) { + return true + } + + var aclCapabilitiesGiven bool + walkFn := func(s string, v interface{}) bool { + if v == nil { + return false + } + + perms := v.(*ACLPermissions) + + switch { + case perms.CapabilitiesBitmap&DenyCapabilityInt > 0: + return false + + case perms.CapabilitiesBitmap&CreateCapabilityInt > 0, + perms.CapabilitiesBitmap&DeleteCapabilityInt > 0, + perms.CapabilitiesBitmap&ListCapabilityInt > 0, + perms.CapabilitiesBitmap&ReadCapabilityInt > 0, + perms.CapabilitiesBitmap&SudoCapabilityInt > 0, + perms.CapabilitiesBitmap&UpdateCapabilityInt > 0: + + aclCapabilitiesGiven = true + + return true + } + + return false + } + + acl.exactRules.WalkPrefix(path, walkFn) + if !aclCapabilitiesGiven { + acl.prefixRules.WalkPrefix(path, walkFn) + } + + if !aclCapabilitiesGiven { + if perms := acl.CheckAllowedFromNonExactPaths(path, true); perms != nil { + return true + } + } + + return aclCapabilitiesGiven +} + +func (b *SystemBackend) pathInternalUIMountsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: make(map[string]interface{}), + } + + secretMounts := make(map[string]interface{}) + authMounts := make(map[string]interface{}) + resp.Data["secret"] = secretMounts + resp.Data["auth"] = authMounts + + var acl *ACL + var isAuthed bool + if req.ClientToken != "" { + isAuthed = true + + var entity *identity.Entity + var te *logical.TokenEntry + // Load the ACL policies so we can walk the prefix for this mount + acl, te, entity, _, err = b.Core.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + return nil, err + } + if entity != nil && entity.Disabled { + b.logger.Warn("permission denied as the entity on the token is disabled") + return nil, logical.ErrPermissionDenied + } + if te != nil && te.EntityID != "" && entity == nil { + b.logger.Warn("permission denied as the entity on the token is invalid") + return nil, logical.ErrPermissionDenied + } + } + + hasAccess := func(ctx context.Context, me *MountEntry) bool { + if me.Config.ListingVisibility == ListingVisibilityUnauth { + return true + } + + if isAuthed { + return hasMountAccess(ctx, acl, me.Namespace().Path+me.Path) + } + + return false + } + + b.Core.mountsLock.RLock() + for _, entry := range b.Core.mounts.Entries { + filtered, err := b.Core.checkReplicatedFiltering(ctx, entry, "") + if err != nil { + b.Core.mountsLock.RUnlock() + return nil, err + } + if filtered { + continue + } + + if ns.ID == entry.NamespaceID && hasAccess(ctx, entry) { + if isAuthed { + // If this is an authed request return all the mount info + secretMounts[entry.Path] = mountInfo(entry) + } else { + secretMounts[entry.Path] = map[string]interface{}{ + "type": entry.Type, + "description": entry.Description, + "options": entry.Options, + } + } + } + } + b.Core.mountsLock.RUnlock() + + b.Core.authLock.RLock() + for _, entry := range b.Core.auth.Entries { + filtered, err := b.Core.checkReplicatedFiltering(ctx, entry, credentialRoutePrefix) + if err != nil { + b.Core.authLock.RUnlock() + return nil, err + } + if filtered { + continue + } + + if ns.ID == entry.NamespaceID && hasAccess(ctx, entry) { + if isAuthed { + // If this is an authed request return all the mount info + authMounts[entry.Path] = mountInfo(entry) + } else { + authMounts[entry.Path] = map[string]interface{}{ + "type": entry.Type, + "description": entry.Description, + "options": entry.Options, + } + } + } + } + b.Core.authLock.RUnlock() + + return resp, nil +} + +func (b *SystemBackend) pathInternalUIMountRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + path := d.Get("path").(string) + if path == "" { + return logical.ErrorResponse("path not set"), logical.ErrInvalidRequest + } + path = sanitizeMountPath(path) + + errResp := logical.ErrorResponse(fmt.Sprintf("preflight capability check returned 403, please ensure client's policies grant access to path %q", path)) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + me := b.Core.router.MatchingMountEntry(ctx, path) + if me == nil { + // Return a permission denied error here so this path cannot be used to + // brute force a list of mounts. + return errResp, logical.ErrPermissionDenied + } + + filtered, err := b.Core.checkReplicatedFiltering(ctx, me, "") + if err != nil { + return nil, err + } + if filtered { + return errResp, logical.ErrPermissionDenied + } + + resp := &logical.Response{ + Data: mountInfo(me), + } + resp.Data["path"] = me.Path + if ns.ID != me.Namespace().ID { + resp.Data["path"] = me.Namespace().Path + me.Path + } + + // Load the ACL policies so we can walk the prefix for this mount + acl, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + return nil, err + } + if entity != nil && entity.Disabled { + b.logger.Warn("permission denied as the entity on the token is disabled") + return errResp, logical.ErrPermissionDenied + } + if te != nil && te.EntityID != "" && entity == nil { + b.logger.Warn("permission denied as the entity on the token is invalid") + return nil, logical.ErrPermissionDenied + } + + if !hasMountAccess(ctx, acl, ns.Path+me.Path) { + return errResp, logical.ErrPermissionDenied + } + + return resp, nil +} + +func (b *SystemBackend) pathInternalCountersRequests(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + counters, err := b.Core.loadAllRequestCounters(ctx, time.Now()) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "counters": counters, + }, + } + + return resp, nil +} + +func (b *SystemBackend) pathInternalCountersTokens(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + activeTokens, err := b.Core.countActiveTokens(ctx) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "counters": activeTokens, + }, + } + + return resp, nil +} + +func (b *SystemBackend) pathInternalCountersEntities(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + activeEntities, err := b.Core.countActiveEntities(ctx) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "counters": activeEntities, + }, + } + + return resp, nil +} + +func (b *SystemBackend) pathInternalUIResultantACL(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if req.ClientToken == "" { + // 204 -- no ACL + return nil, nil + } + + acl, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + return nil, err + } + + if entity != nil && entity.Disabled { + b.logger.Warn("permission denied as the entity on the token is disabled") + return logical.ErrorResponse(logical.ErrPermissionDenied.Error()), nil + } + if te != nil && te.EntityID != "" && entity == nil { + b.logger.Warn("permission denied as the entity on the token is invalid") + return logical.ErrorResponse(logical.ErrPermissionDenied.Error()), nil + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "root": false, + }, + } + + if acl.root { + resp.Data["root"] = true + return resp, nil + } + + exact := map[string]interface{}{} + glob := map[string]interface{}{} + + walkFn := func(pt map[string]interface{}, s string, v interface{}) { + if v == nil { + return + } + + perms := v.(*ACLPermissions) + capabilities := []string{} + + if perms.CapabilitiesBitmap&CreateCapabilityInt > 0 { + capabilities = append(capabilities, CreateCapability) + } + if perms.CapabilitiesBitmap&DeleteCapabilityInt > 0 { + capabilities = append(capabilities, DeleteCapability) + } + if perms.CapabilitiesBitmap&ListCapabilityInt > 0 { + capabilities = append(capabilities, ListCapability) + } + if perms.CapabilitiesBitmap&ReadCapabilityInt > 0 { + capabilities = append(capabilities, ReadCapability) + } + if perms.CapabilitiesBitmap&SudoCapabilityInt > 0 { + capabilities = append(capabilities, SudoCapability) + } + if perms.CapabilitiesBitmap&UpdateCapabilityInt > 0 { + capabilities = append(capabilities, UpdateCapability) + } + + // If "deny" is explicitly set or if the path has no capabilities at all, + // set the path capabilities to "deny" + if perms.CapabilitiesBitmap&DenyCapabilityInt > 0 || len(capabilities) == 0 { + capabilities = []string{DenyCapability} + } + + res := map[string]interface{}{} + if len(capabilities) > 0 { + res["capabilities"] = capabilities + } + if perms.MinWrappingTTL != 0 { + res["min_wrapping_ttl"] = int64(perms.MinWrappingTTL.Seconds()) + } + if perms.MaxWrappingTTL != 0 { + res["max_wrapping_ttl"] = int64(perms.MaxWrappingTTL.Seconds()) + } + if len(perms.AllowedParameters) > 0 { + res["allowed_parameters"] = perms.AllowedParameters + } + if len(perms.DeniedParameters) > 0 { + res["denied_parameters"] = perms.DeniedParameters + } + if len(perms.RequiredParameters) > 0 { + res["required_parameters"] = perms.RequiredParameters + } + + pt[s] = res + } + + exactWalkFn := func(s string, v interface{}) bool { + walkFn(exact, s, v) + return false + } + + globWalkFn := func(s string, v interface{}) bool { + walkFn(glob, s, v) + return false + } + + acl.exactRules.Walk(exactWalkFn) + acl.prefixRules.Walk(globWalkFn) + + resp.Data["exact_paths"] = exact + resp.Data["glob_paths"] = glob + + return resp, nil +} + +func (b *SystemBackend) pathInternalOpenAPI(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + + // Limit output to authorized paths + resp, err := b.pathInternalUIMountsRead(ctx, req, d) + if err != nil { + return nil, err + } + + context := d.Get("context").(string) + + // Set up target document and convert to map[string]interface{} which is what will + // be received from plugin backends. + doc := framework.NewOASDocument() + + procMountGroup := func(group, mountPrefix string) error { + for mount := range resp.Data[group].(map[string]interface{}) { + backend := b.Core.router.MatchingBackend(ctx, mountPrefix+mount) + + if backend == nil { + continue + } + + req := &logical.Request{ + Operation: logical.HelpOperation, + Storage: req.Storage, + } + + resp, err := backend.HandleRequest(ctx, req) + if err != nil { + return err + } + + var backendDoc *framework.OASDocument + + // Normalize response type, which will be different if received + // from an external plugin. + switch v := resp.Data["openapi"].(type) { + case *framework.OASDocument: + backendDoc = v + case map[string]interface{}: + backendDoc, err = framework.NewOASDocumentFromMap(v) + if err != nil { + return err + } + default: + continue + } + + // Prepare to add tags to default builtins that are + // type "unknown" and won't already be tagged. + var tag string + switch mountPrefix + mount { + case "cubbyhole/", "secret/": + tag = "secrets" + case "sys/": + tag = "system" + case "auth/token/": + tag = "auth" + case "identity/": + tag = "identity" + } + + // Merge backend paths with existing document + for path, obj := range backendDoc.Paths { + path := strings.TrimPrefix(path, "/") + + // Add tags to all of the operations if necessary + if tag != "" { + for _, op := range []*framework.OASOperation{obj.Get, obj.Post, obj.Delete} { + // TODO: a special override for identity is used used here because the backend + // is currently categorized as "secret", which will likely change. Also of interest + // is removing all tag handling here and providing the mount information to OpenAPI. + if op != nil && (len(op.Tags) == 0 || tag == "identity") { + op.Tags = []string{tag} + } + } + } + + doc.Paths["/"+mountPrefix+mount+path] = obj + } + } + return nil + } + + if err := procMountGroup("secret", ""); err != nil { + return nil, err + } + if err := procMountGroup("auth", "auth/"); err != nil { + return nil, err + } + + doc.CreateOperationIDs(context) + + buf, err := json.Marshal(doc) + if err != nil { + return nil, err + } + + resp = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPStatusCode: 200, + logical.HTTPRawBody: buf, + logical.HTTPContentType: "application/json", + }, + } + + return resp, nil +} + +func sanitizeMountPath(path string) string { + if !strings.HasSuffix(path, "/") { + path += "/" + } + + if strings.HasPrefix(path, "/") { + path = path[1:] + } + + return path +} + +func checkListingVisibility(visibility ListingVisibilityType) error { + switch visibility { + case ListingVisibilityDefault: + case ListingVisibilityHidden: + case ListingVisibilityUnauth: + default: + return fmt.Errorf("invalid listing visilibity type") + } + + return nil +} + +const sysHelpRoot = ` +The system backend is built-in to Vault and cannot be remounted or +unmounted. It contains the paths that are used to configure Vault itself +as well as perform core operations. +` + +// sysHelp is all the help text for the sys backend. +var sysHelp = map[string][2]string{ + "license": { + "Sets the license of the server.", + ` +The path responds to the following HTTP methods. + + GET / + Returns information on the installed license + + POST + Sets the license for the server + `, + }, + "config/cors": { + "Configures or returns the current configuration of CORS settings.", + ` +This path responds to the following HTTP methods. + + GET / + Returns the configuration of the CORS setting. + + POST / + Sets the comma-separated list of origins that can make cross-origin requests. + + DELETE / + Clears the CORS configuration and disables acceptance of CORS requests. + `, + }, + "config/ui/headers": { + "Configures response headers that should be returned from the UI.", + ` +This path responds to the following HTTP methods. + GET /
+ Returns the header value. + POST /
+ Sets the header value for the UI. + DELETE /
+ Clears the header value for UI. + + LIST / + List the headers configured for the UI. + `, + }, + "init": { + "Initializes or returns the initialization status of the Vault.", + ` +This path responds to the following HTTP methods. + + GET / + Returns the initialization status of the Vault. + + POST / + Initializes a new vault. + `, + }, + "health": { + "Checks the health status of the Vault.", + ` +This path responds to the following HTTP methods. + + GET / + Returns health information about the Vault. + `, + }, + "generate-root": { + "Reads, generates, or deletes a root token regeneration process.", + ` +This path responds to multiple HTTP methods which change the behavior. Those +HTTP methods are listed below. + + GET /attempt + Reads the configuration and progress of the current root generation + attempt. + + POST /attempt + Initializes a new root generation attempt. Only a single root generation + attempt can take place at a time. One (and only one) of otp or pgp_key + are required. + + DELETE /attempt + Cancels any in-progress root generation attempt. This clears any + progress made. This must be called to change the OTP or PGP key being + used. + `, + }, + "seal-status": { + "Returns the seal status of the Vault.", + ` +This path responds to the following HTTP methods. + + GET / + Returns the seal status of the Vault. This is an unauthenticated + endpoint. + `, + }, + "seal": { + "Seals the Vault.", + ` +This path responds to the following HTTP methods. + + PUT / + Seals the Vault. + `, + }, + "unseal": { + "Unseals the Vault.", + ` +This path responds to the following HTTP methods. + + PUT / + Unseals the Vault. + `, + }, + "mounts": { + "List the currently mounted backends.", + ` +This path responds to the following HTTP methods. + + GET / + Lists all the mounted secret backends. + + GET / + Get information about the mount at the specified path. + + POST / + Mount a new secret backend to the mount point in the URL. + + POST //tune + Tune configuration parameters for the given mount point. + + DELETE / + Unmount the specified mount point. + `, + }, + + "mount": { + `Mount a new backend at a new path.`, + ` +Mount a backend at a new path. A backend can be mounted multiple times at +multiple paths in order to configure multiple separately configured backends. +Example: you might have an AWS backend for the east coast, and one for the +west coast. + `, + }, + + "mount_path": { + `The path to mount to. Example: "aws/east"`, + "", + }, + + "mount_type": { + `The type of the backend. Example: "passthrough"`, + "", + }, + + "mount_desc": { + `User-friendly description for this mount.`, + "", + }, + + "mount_config": { + `Configuration for this mount, such as default_lease_ttl +and max_lease_ttl.`, + }, + + "mount_local": { + `Mark the mount as a local mount, which is not replicated +and is unaffected by replication.`, + }, + + "mount_plugin_name": { + `Name of the plugin to mount based from the name registered +in the plugin catalog.`, + }, + + "mount_options": { + `The options to pass into the backend. Should be a json object with string keys and values.`, + }, + + "seal_wrap": { + `Whether to turn on seal wrapping for the mount.`, + }, + + "external_entropy_access": { + `Whether to give the mount access to Vault's external entropy.`, + }, + + "tune_default_lease_ttl": { + `The default lease TTL for this mount.`, + }, + + "tune_max_lease_ttl": { + `The max lease TTL for this mount.`, + }, + + "tune_audit_non_hmac_request_keys": { + `The list of keys in the request data object that will not be HMAC'ed by audit devices.`, + }, + + "tune_audit_non_hmac_response_keys": { + `The list of keys in the response data object that will not be HMAC'ed by audit devices.`, + }, + + "tune_mount_options": { + `The options to pass into the backend. Should be a json object with string keys and values.`, + }, + + "remount": { + "Move the mount point of an already-mounted backend.", + ` +This path responds to the following HTTP methods. + + POST /sys/remount + Changes the mount point of an already-mounted backend. + `, + }, + + "auth_tune": { + "Tune the configuration parameters for an auth path.", + `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of +the auth path.`, + }, + + "mount_tune": { + "Tune backend configuration parameters for this mount.", + `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of +the mount.`, + }, + + "renew": { + "Renew a lease on a secret", + ` +When a secret is read, it may optionally include a lease interval +and a boolean indicating if renew is possible. For secrets that support +lease renewal, this endpoint is used to extend the validity of the +lease and to prevent an automatic revocation. + `, + }, + + "lease_id": { + "The lease identifier to renew. This is included with a lease.", + "", + }, + + "increment": { + "The desired increment in seconds to the lease", + "", + }, + + "revoke": { + "Revoke a leased secret immediately", + ` +When a secret is generated with a lease, it is automatically revoked +at the end of the lease period if not renewed. However, in some cases +you may want to force an immediate revocation. This endpoint can be +used to revoke the secret with the given Lease ID. + `, + }, + + "revoke-sync": { + "Whether or not to perform the revocation synchronously", + ` +If false, the call will return immediately and revocation will be queued; if it +fails, Vault will keep trying. If true, if the revocation fails, Vault will not +automatically try again and will return an error. For revoke-prefix, this +setting will apply to all leases being revoked. For revoke-force, since errors +are ignored, this setting is not supported. +`, + }, + + "revoke-prefix": { + "Revoke all secrets generated in a given prefix", + ` +Revokes all the secrets generated under a given mount prefix. As +an example, "prod/aws/" might be the AWS logical backend, and due to +a change in the "ops" policy, we may want to invalidate all the secrets +generated. We can do a revoke prefix at "prod/aws/ops" to revoke all +the ops secrets. This does a prefix match on the Lease IDs and revokes +all matching leases. + `, + }, + + "revoke-prefix-path": { + `The path to revoke keys under. Example: "prod/aws/ops"`, + "", + }, + + "revoke-force": { + "Revoke all secrets generated in a given prefix, ignoring errors.", + ` +See the path help for 'revoke-prefix'; this behaves the same, except that it +ignores errors encountered during revocation. This can be used in certain +recovery situations; for instance, when you want to unmount a backend, but it +is impossible to fix revocation errors and these errors prevent the unmount +from proceeding. This is a DANGEROUS operation as it removes Vault's oversight +of external secrets. Access to this prefix should be tightly controlled. + `, + }, + + "revoke-force-path": { + `The path to revoke keys under. Example: "prod/aws/ops"`, + "", + }, + + "auth-table": { + "List the currently enabled credential backends.", + ` +This path responds to the following HTTP methods. + + GET / + List the currently enabled credential backends: the name, the type of + the backend, and a user friendly description of the purpose for the + credential backend. + + POST / + Enable a new auth method. + + DELETE / + Disable the auth method at the given mount point. + `, + }, + + "auth": { + `Enable a new credential backend with a name.`, + ` +Enable a credential mechanism at a new path. A backend can be mounted multiple times at +multiple paths in order to configure multiple separately configured backends. +Example: you might have an OAuth backend for GitHub, and one for Google Apps. + `, + }, + + "auth_path": { + `The path to mount to. Cannot be delimited. Example: "user"`, + "", + }, + + "auth_type": { + `The type of the backend. Example: "userpass"`, + "", + }, + + "auth_desc": { + `User-friendly description for this credential backend.`, + "", + }, + + "auth_config": { + `Configuration for this mount, such as plugin_name.`, + }, + + "auth_plugin": { + `Name of the auth plugin to use based from the name in the plugin catalog.`, + "", + }, + + "auth_options": { + `The options to pass into the backend. Should be a json object with string keys and values.`, + }, + + "policy-list": { + `List the configured access control policies.`, + ` +This path responds to the following HTTP methods. + + GET / + List the names of the configured access control policies. + + GET / + Retrieve the rules for the named policy. + + PUT / + Add or update a policy. + + DELETE / + Delete the policy with the given name. + `, + }, + + "policy": { + `Read, Modify, or Delete an access control policy.`, + ` +Read the rules of an existing policy, create or update the rules of a policy, +or delete a policy. + `, + }, + + "policy-name": { + `The name of the policy. Example: "ops"`, + "", + }, + + "policy-rules": { + `The rules of the policy.`, + "", + }, + + "policy-paths": { + `The paths on which the policy should be applied.`, + "", + }, + + "policy-enforcement-level": { + `The enforcement level to apply to the policy.`, + "", + }, + + "audit-hash": { + "The hash of the given string via the given audit backend", + "", + }, + + "audit-table": { + "List the currently enabled audit backends.", + ` +This path responds to the following HTTP methods. + + GET / + List the currently enabled audit backends. + + PUT / + Enable an audit backend at the given path. + + DELETE / + Disable the given audit backend. + `, + }, + + "audit_path": { + `The name of the backend. Cannot be delimited. Example: "mysql"`, + "", + }, + + "audit_type": { + `The type of the backend. Example: "mysql"`, + "", + }, + + "audit_desc": { + `User-friendly description for this audit backend.`, + "", + }, + + "audit_opts": { + `Configuration options for the audit backend.`, + "", + }, + + "audit": { + `Enable or disable audit backends.`, + ` +Enable a new audit backend or disable an existing backend. + `, + }, + + "key-status": { + "Provides information about the backend encryption key.", + ` + Provides the current backend encryption key term and installation time. + `, + }, + + "rotate": { + "Rotates the backend encryption key used to persist data.", + ` + Rotate generates a new encryption key which is used to encrypt all + data going to the storage backend. The old encryption keys are kept so + that data encrypted using those keys can still be decrypted. + `, + }, + + "rekey_backup": { + "Allows fetching or deleting the backup of the rotated unseal keys.", + "", + }, + + "capabilities": { + "Fetches the capabilities of the given token on the given path.", + `Returns the capabilities of the given token on the path. + The path will be searched for a path match in all the policies associated with the token.`, + }, + + "capabilities_self": { + "Fetches the capabilities of the given token on the given path.", + `Returns the capabilities of the client token on the path. + The path will be searched for a path match in all the policies associated with the client token.`, + }, + + "capabilities_accessor": { + "Fetches the capabilities of the token associated with the given token, on the given path.", + `When there is no access to the token, token accessor can be used to fetch the token's capabilities + on a given path.`, + }, + + "tidy_leases": { + `This endpoint performs cleanup tasks that can be run if certain error +conditions have occurred.`, + `This endpoint performs cleanup tasks that can be run to clean up the +lease entries after certain error conditions. Usually running this is not +necessary, and is only required if upgrade notes or support personnel suggest +it.`, + }, + + "wrap": { + "Response-wraps an arbitrary JSON object.", + `Round trips the given input data into a response-wrapped token.`, + }, + + "wrappubkey": { + "Returns pubkeys used in some wrapping formats.", + "Returns pubkeys used in some wrapping formats.", + }, + + "unwrap": { + "Unwraps a response-wrapped token.", + `Unwraps a response-wrapped token. Unlike simply reading from cubbyhole/response, + this provides additional validation on the token, and rather than a JSON-escaped + string, the returned response is the exact same as the contained wrapped response.`, + }, + + "wraplookup": { + "Looks up the properties of a response-wrapped token.", + `Returns the creation TTL and creation time of a response-wrapped token.`, + }, + + "rewrap": { + "Rotates a response-wrapped token.", + `Rotates a response-wrapped token; the output is a new token with the same + response wrapped inside and the same creation TTL. The original token is revoked.`, + }, + "audited-headers-name": { + "Configures the headers sent to the audit logs.", + ` +This path responds to the following HTTP methods. + + GET / + Returns the setting for the header with the given name. + + POST / + Enable auditing of the given header. + + DELETE / + Disable auditing of the given header. + `, + }, + "audited-headers": { + "Lists the headers configured to be audited.", + `Returns a list of headers that have been configured to be audited.`, + }, + "plugin-catalog-list-all": { + "Lists all the plugins known to Vault", + ` +This path responds to the following HTTP methods. + LIST / + Returns a list of names of configured plugins. + `, + }, + "plugin-catalog": { + "Configures the plugins known to Vault", + ` +This path responds to the following HTTP methods. + LIST / + Returns a list of names of configured plugins. + + GET / + Retrieve the metadata for the named plugin. + + PUT / + Add or update plugin. + + DELETE / + Delete the plugin with the given name. + `, + }, + "plugin-catalog_name": { + "The name of the plugin", + "", + }, + "plugin-catalog_type": { + "The type of the plugin, may be auth, secret, or database", + "", + }, + "plugin-catalog_sha-256": { + `The SHA256 sum of the executable used in the +command field. This should be HEX encoded.`, + "", + }, + "plugin-catalog_command": { + `The command used to start the plugin. The +executable defined in this command must exist in vault's +plugin directory.`, + "", + }, + "plugin-catalog_args": { + `The args passed to plugin command.`, + "", + }, + "plugin-catalog_env": { + `The environment variables passed to plugin command. +Each entry is of the form "key=value".`, + "", + }, + "leases": { + `View or list lease metadata.`, + ` +This path responds to the following HTTP methods. + + PUT / + Retrieve the metadata for the provided lease id. + + LIST / + Lists the leases for the named prefix. + `, + }, + + "leases-list-prefix": { + `The path to list leases under. Example: "aws/creds/deploy"`, + "", + }, + "plugin-reload": { + "Reload mounts that use a particular backend plugin.", + `Reload mounts that use a particular backend plugin. Either the plugin name + or the desired plugin backend mounts must be provided, but not both. In the + case that the plugin name is provided, all mounted paths that use that plugin + backend will be reloaded.`, + }, + "plugin-backend-reload-plugin": { + `The name of the plugin to reload, as registered in the plugin catalog.`, + "", + }, + "plugin-backend-reload-mounts": { + `The mount paths of the plugin backends to reload.`, + "", + }, + "hash": { + "Generate a hash sum for input data", + "Generates a hash sum of the given algorithm against the given input data.", + }, + "random": { + "Generate random bytes", + "This function can be used to generate high-entropy random bytes.", + }, + "listing_visibility": { + "Determines the visibility of the mount in the UI-specific listing endpoint. Accepted value are 'unauth' and ''.", + "", + }, + "passthrough_request_headers": { + "A list of headers to whitelist and pass from the request to the plugin.", + "", + }, + "allowed_response_headers": { + "A list of headers to whitelist and allow a plugin to set on responses.", + "", + }, + "token_type": { + "The type of token to issue (service or batch).", + "", + }, + "raw": { + "Write, Read, and Delete data directly in the Storage backend.", + "", + }, + "internal-ui-mounts": { + "Information about mounts returned according to their tuned visibility. Internal API; its location, inputs, and outputs may change.", + "", + }, + "internal-ui-namespaces": { + "Information about visible child namespaces. Internal API; its location, inputs, and outputs may change.", + `Information about visible child namespaces returned starting from the request's + context namespace and filtered based on access from the client token. Internal API; + its location, inputs, and outputs may change.`, + }, + "internal-ui-resultant-acl": { + "Information about a token's resultant ACL. Internal API; its location, inputs, and outputs may change.", + "", + }, + "metrics": { + "Export the metrics aggregated for telemetry purpose.", + "", + }, + "internal-counters-requests": { + "Count of requests seen by this Vault cluster over time.", + "Count of requests seen by this Vault cluster over time. Not included in count: health checks, UI asset requests, requests forwarded from another cluster.", + }, + "internal-counters-tokens": { + "Count of active tokens in this Vault cluster.", + "Count of active tokens in this Vault cluster.", + }, + "internal-counters-entities": { + "Count of active entities in this Vault cluster.", + "Count of active entities in this Vault cluster.", + }, + "host-info": { + "Information about the host instance that this Vault server is running on.", + `Information about the host instance that this Vault server is running on. + The information that gets collected includes host hardware information, and CPU, + disk, and memory utilization`, + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go new file mode 100644 index 00000000..14cee1f0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go @@ -0,0 +1,135 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + invalidateMFAConfig = func(context.Context, *SystemBackend, string) {} + + sysInvalidate = func(b *SystemBackend) func(context.Context, string) { + return nil + } + + getSystemSchemas = func() []func() *memdb.TableSchema { return nil } + + getEGPListResponseKeyInfo = func(*SystemBackend, *namespace.Namespace) map[string]interface{} { return nil } + addSentinelPolicyData = func(map[string]interface{}, *Policy) {} + inputSentinelPolicyData = func(*framework.FieldData, *Policy) *logical.Response { return nil } + + controlGroupUnwrap = func(context.Context, *SystemBackend, string, bool) (string, error) { + return "", errors.New("control groups unavailable") + } + + pathInternalUINamespacesRead = func(b *SystemBackend) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + // Short-circuit here if there's no client token provided + if req.ClientToken == "" { + return nil, fmt.Errorf("client token empty") + } + + // Load the ACL policies so we can check for access and filter namespaces + _, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req) + if err != nil { + return nil, err + } + if entity != nil && entity.Disabled { + b.logger.Warn("permission denied as the entity on the token is disabled") + return nil, logical.ErrPermissionDenied + } + if te != nil && te.EntityID != "" && entity == nil { + b.logger.Warn("permission denied as the entity on the token is invalid") + return nil, logical.ErrPermissionDenied + } + + return logical.ListResponse([]string{""}), nil + } + } + + pathLicenseRead = func(b *SystemBackend) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return nil, nil + } + } + + pathLicenseUpdate = func(b *SystemBackend) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return nil, nil + } + } + + entPaths = func(b *SystemBackend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "replication/status", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + resp := &logical.Response{ + Data: map[string]interface{}{ + "mode": "disabled", + }, + } + return resp, nil + }, + }, + }, + } + } + + checkRaw = func(b *SystemBackend, path string) error { return nil } +) + +// tuneMount is used to set config on a mount point +func (b *SystemBackend) tuneMountTTLs(ctx context.Context, path string, me *MountEntry, newDefault, newMax time.Duration) error { + zero := time.Duration(0) + + switch { + case newDefault == zero && newMax == zero: + // No checks needed + + case newDefault == zero && newMax != zero: + // No default/max conflict, no checks needed + + case newDefault != zero && newMax == zero: + // No default/max conflict, no checks needed + + case newDefault != zero && newMax != zero: + if newMax < newDefault { + return fmt.Errorf("backend max lease TTL of %d would be less than backend default lease TTL of %d", int(newMax.Seconds()), int(newDefault.Seconds())) + } + } + + origMax := me.Config.MaxLeaseTTL + origDefault := me.Config.DefaultLeaseTTL + + me.Config.MaxLeaseTTL = newMax + me.Config.DefaultLeaseTTL = newDefault + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, credentialRoutePrefix): + err = b.Core.persistAuth(ctx, b.Core.auth, &me.Local) + default: + err = b.Core.persistMounts(ctx, b.Core.mounts, &me.Local) + } + if err != nil { + me.Config.MaxLeaseTTL = origMax + me.Config.DefaultLeaseTTL = origDefault + return fmt.Errorf("failed to update mount table, rolling back TTL changes") + } + if b.Core.logger.IsInfo() { + b.Core.logger.Info("mount tuning of leases successful", "path", path) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go b/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go new file mode 100644 index 00000000..0613c562 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go @@ -0,0 +1,1654 @@ +package vault + +import ( + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *SystemBackend) configPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "config/cors$", + + Fields: map[string]*framework.FieldSchema{ + "enable": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Enables or disables CORS headers on requests.", + }, + "allowed_origins": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.", + }, + "allowed_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleCORSRead, + Summary: "Return the current CORS settings.", + Description: "", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleCORSUpdate, + Summary: "Configure the CORS settings.", + Description: "", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleCORSDelete, + Summary: "Remove any CORS settings.", + }, + }, + + HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]), + HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]), + }, + + { + Pattern: "config/state/sanitized$", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleConfigStateSanitized, + Summary: "Return a sanitized version of the Vault server configuration.", + Description: "The sanitized output strips configuration values in the storage, HA storage, and seals stanzas, which may contain sensitive values such as API tokens. It also removes any token or secret fields in other stanzas, such as the circonus_api_token from telemetry.", + }, + }, + }, + + { + Pattern: "config/ui/headers/" + framework.GenericNameRegex("header"), + + Fields: map[string]*framework.FieldSchema{ + "header": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The name of the header.", + }, + "values": &framework.FieldSchema{ + Type: framework.TypeStringSlice, + Description: "The values to set the header.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleConfigUIHeadersRead, + Summary: "Return the given UI header's configuration", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleConfigUIHeadersUpdate, + Summary: "Configure the values to be returned for the UI header.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleConfigUIHeadersDelete, + Summary: "Remove a UI header.", + }, + }, + + HelpDescription: strings.TrimSpace(sysHelp["config/ui/headers"][0]), + HelpSynopsis: strings.TrimSpace(sysHelp["config/ui/headers"][1]), + }, + + { + Pattern: "config/ui/headers/$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.handleConfigUIHeadersList, + Summary: "Return a list of configured UI headers.", + }, + }, + + HelpDescription: strings.TrimSpace(sysHelp["config/ui/headers"][0]), + HelpSynopsis: strings.TrimSpace(sysHelp["config/ui/headers"][1]), + }, + + { + Pattern: "generate-root(/attempt)?$", + Fields: map[string]*framework.FieldSchema{ + "pgp_key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a base64-encoded PGP public key.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Read the configuration and progress of the current root generation attempt.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Initializes a new root generation attempt.", + Description: "Only a single root generation attempt can take place at a time. One (and only one) of otp or pgp_key are required.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Summary: "Cancels any in-progress root generation attempt.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]), + HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]), + }, + { + Pattern: "generate-root/update$", + Fields: map[string]*framework.FieldSchema{ + "key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a single master key share.", + }, + "nonce": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies the nonce of the attempt.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Enter a single master key share to progress the root generation attempt.", + Description: "If the threshold number of master key shares is reached, Vault will complete the root generation and issue the new token. Otherwise, this API must be called multiple times until that threshold is met. The attempt nonce must be provided with each call.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]), + HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]), + }, + { + Pattern: "health$", + Fields: map[string]*framework.FieldSchema{ + "standbyok": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Specifies if being a standby should still return the active status code.", + }, + "perfstandbyok": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Specifies if being a performance standby should still return the active status code.", + }, + "activecode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for an active node.", + }, + "standbycode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for a standby node.", + }, + "drsecondarycode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for a DR secondary node.", + }, + "performancestandbycode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for a performance standby node.", + }, + "sealedcode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for a sealed node.", + }, + "uninitcode": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the status code for an uninitialized node.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Returns the health status of Vault.", + Responses: map[int][]framework.Response{ + 200: {{Description: "initialized, unsealed, and active"}}, + 429: {{Description: "unsealed and standby"}}, + 472: {{Description: "data recovery mode replication secondary and active"}}, + 501: {{Description: "not initialized"}}, + 503: {{Description: "sealed"}}, + }, + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["health"][0]), + HelpDescription: strings.TrimSpace(sysHelp["health"][1]), + }, + + { + Pattern: "init$", + Fields: map[string]*framework.FieldSchema{ + "pgp_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `secret_shares`.", + }, + "root_token_pgp_key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a PGP public key used to encrypt the initial root token. The key must be base64-encoded from its original binary representation.", + }, + "secret_shares": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares to split the master key into.", + }, + "secret_threshold": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as `secret_shares`.", + }, + "stored_shares": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares that should be encrypted by the HSM and stored for auto-unsealing. Currently must be the same as `secret_shares`.", + }, + "recovery_shares": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares to split the recovery key into.", + }, + "recovery_threshold": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: " Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to `recovery_shares`.", + }, + "recovery_pgp_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Specifies an array of PGP public keys used to encrypt the output recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `recovery_shares`.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Returns the initialization status of Vault.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Initialize a new Vault.", + Description: "The Vault must not have been previously initialized. The recovery options, as well as the stored shares option, are only available when using Vault HSM.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["init"][0]), + HelpDescription: strings.TrimSpace(sysHelp["init"][1]), + }, + { + Pattern: "leader$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Returns the high availability status and current leader instance of Vault.", + }, + }, + + HelpSynopsis: "Check the high availability status and current leader of Vault", + }, + { + Pattern: "step-down$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Cause the node to give up active status.", + Description: "This endpoint forces the node to give up active status. If the node does not have active status, this endpoint does nothing. Note that the node will sleep for ten seconds before attempting to grab the active lock again, but if no standby nodes grab the active lock in the interim, the same node may become the active node again.", + Responses: map[int][]framework.Response{ + 204: {{Description: "empty body"}}, + }, + }, + }, + }, + } +} + +func (b *SystemBackend) rekeyPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "rekey/init", + + Fields: map[string]*framework.FieldSchema{ + "secret_shares": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares to split the master key into.", + }, + "secret_threshold": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as secret_shares.", + }, + "pgp_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as secret_shares.", + }, + "backup": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the PGP-encrypted keys.", + }, + "require_verification": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Turns on verification functionality", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Reads the configuration and progress of the current rekey attempt.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Initializes a new rekey attempt.", + Description: "Only a single rekey attempt can take place at a time, and changing the parameters of a rekey requires canceling and starting a new rekey, which will also provide a new nonce.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Summary: "Cancels any in-progress rekey.", + Description: "This clears the rekey settings as well as any progress made. This must be called to change the parameters of the rekey. Note: verification is still a part of a rekey. If rekeying is canceled during the verification flow, the current unseal keys remain valid.", + }, + }, + }, + { + Pattern: "rekey/backup$", + + Fields: map[string]*framework.FieldSchema{}, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleRekeyRetrieveBarrier, + Summary: "Return the backup copy of PGP-encrypted unseal keys.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleRekeyDeleteBarrier, + Summary: "Delete the backup copy of PGP-encrypted unseal keys.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]), + HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]), + }, + + { + Pattern: "rekey/recovery-key-backup$", + + Fields: map[string]*framework.FieldSchema{}, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleRekeyRetrieveRecovery, + logical.DeleteOperation: b.handleRekeyDeleteRecovery, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]), + HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]), + }, + { + Pattern: "rekey/update", + + Fields: map[string]*framework.FieldSchema{ + "key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a single master key share.", + }, + "nonce": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies the nonce of the rekey attempt.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Enter a single master key share to progress the rekey of the Vault.", + }, + }, + }, + { + Pattern: "rekey/verify", + + Fields: map[string]*framework.FieldSchema{ + "key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a single master share key from the new set of shares.", + }, + "nonce": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies the nonce of the rekey verification operation.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Read the configuration and progress of the current rekey verification attempt.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Summary: "Cancel any in-progress rekey verification operation.", + Description: "This clears any progress made and resets the nonce. Unlike a `DELETE` against `sys/rekey/init`, this only resets the current verification operation, not the entire rekey atttempt.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Enter a single new key share to progress the rekey verification operation.", + }, + }, + }, + + { + Pattern: "seal-status$", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Summary: "Check the seal status of a Vault.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["seal-status"][0]), + HelpDescription: strings.TrimSpace(sysHelp["seal-status"][1]), + }, + + { + Pattern: "seal$", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Seal the Vault.", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["seal"][0]), + HelpDescription: strings.TrimSpace(sysHelp["seal"][1]), + }, + + { + Pattern: "unseal$", + Fields: map[string]*framework.FieldSchema{ + "key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Specifies a single master key share. This is required unless reset is true.", + }, + "reset": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Specifies if previously-provided unseal keys are discarded and the unseal process is reset.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Summary: "Unseal the Vault.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["unseal"][0]), + HelpDescription: strings.TrimSpace(sysHelp["unseal"][1]), + }, + } +} + +func (b *SystemBackend) auditPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "audit-hash/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["audit_path"][0]), + }, + + "input": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleAuditHash, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["audit-hash"][0]), + HelpDescription: strings.TrimSpace(sysHelp["audit-hash"][1]), + }, + + { + Pattern: "audit$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleAuditTable, + Summary: "List the enabled audit devices.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["audit-table"][0]), + HelpDescription: strings.TrimSpace(sysHelp["audit-table"][1]), + }, + + { + Pattern: "audit/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["audit_path"][0]), + }, + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["audit_type"][0]), + }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["audit_desc"][0]), + }, + "options": &framework.FieldSchema{ + Type: framework.TypeKVPairs, + Description: strings.TrimSpace(sysHelp["audit_opts"][0]), + }, + "local": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["mount_local"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleEnableAudit, + Summary: "Enable a new audit device at the supplied path.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleDisableAudit, + Summary: "Disable the audit device at the given path.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["audit"][0]), + HelpDescription: strings.TrimSpace(sysHelp["audit"][1]), + }, + + { + Pattern: "config/auditing/request-headers/(?P
.+)", + + Fields: map[string]*framework.FieldSchema{ + "header": &framework.FieldSchema{ + Type: framework.TypeString, + }, + "hmac": &framework.FieldSchema{ + Type: framework.TypeBool, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleAuditedHeaderUpdate, + Summary: "Enable auditing of a header.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleAuditedHeaderDelete, + Summary: "Disable auditing of the given request header.", + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleAuditedHeaderRead, + Summary: "List the information for the given request header.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers-name"][0]), + HelpDescription: strings.TrimSpace(sysHelp["audited-headers-name"][1]), + }, + + { + Pattern: "config/auditing/request-headers$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleAuditedHeadersRead, + Summary: "List the request headers that are configured to be audited.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]), + HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]), + }, + } +} + +func (b *SystemBackend) sealPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "key-status$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleKeyStatus, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["key-status"][0]), + HelpDescription: strings.TrimSpace(sysHelp["key-status"][1]), + }, + + { + Pattern: "rotate$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleRotate, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["rotate"][0]), + HelpDescription: strings.TrimSpace(sysHelp["rotate"][1]), + }, + } +} + +func (b *SystemBackend) pluginsCatalogCRUDPath() *framework.Path { + return &framework.Path{ + Pattern: "plugins/catalog(/(?Pauth|database|secret))?/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]), + }, + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]), + }, + "sha256": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), + }, + "sha_256": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), + }, + "command": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]), + }, + "args": &framework.FieldSchema{ + Type: framework.TypeStringSlice, + Description: strings.TrimSpace(sysHelp["plugin-catalog_args"][0]), + }, + "env": &framework.FieldSchema{ + Type: framework.TypeStringSlice, + Description: strings.TrimSpace(sysHelp["plugin-catalog_env"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handlePluginCatalogUpdate, + Summary: "Register a new plugin, or updates an existing one with the supplied name.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handlePluginCatalogDelete, + Summary: "Remove the plugin with the given name.", + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePluginCatalogRead, + Summary: "Return the configuration data for the plugin with the given name.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + } +} + +func (b *SystemBackend) pluginsCatalogListPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "plugins/catalog/(?Pauth|database|secret)/?$", + + Fields: map[string]*framework.FieldSchema{ + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.handlePluginCatalogTypedList, + Summary: "List the plugins in the catalog.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + }, + { + Pattern: "plugins/catalog/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handlePluginCatalogUntypedList, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog-list-all"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog-list-all"][1]), + }, + } +} + +func (b *SystemBackend) pluginsReloadPath() *framework.Path { + return &framework.Path{ + Pattern: "plugins/reload/backend$", + + Fields: map[string]*framework.FieldSchema{ + "plugin": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]), + }, + "mounts": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handlePluginReloadUpdate, + Summary: "Reload mounted plugin backends.", + Description: "Either the plugin name (`plugin`) or the desired plugin backend mounts (`mounts`) must be provided, but not both. In the case that the plugin name is provided, all mounted paths that use that plugin backend will be reloaded.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-reload"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-reload"][1]), + } +} + +func (b *SystemBackend) toolsPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "tools/hash" + framework.OptionalParamRegex("urlalgorithm"), + Fields: map[string]*framework.FieldSchema{ + "input": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "sha2-256", + Description: `Algorithm to use (POST body parameter). Valid values are: + + * sha2-224 + * sha2-256 + * sha2-384 + * sha2-512 + + Defaults to "sha2-256".`, + }, + + "urlalgorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Algorithm to use (POST URL parameter)`, + }, + + "format": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "hex", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathHashWrite, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["hash"][0]), + HelpDescription: strings.TrimSpace(sysHelp["hash"][1]), + }, + + { + Pattern: "tools/random" + framework.OptionalParamRegex("urlbytes"), + Fields: map[string]*framework.FieldSchema{ + "urlbytes": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The number of bytes to generate (POST URL parameter)", + }, + + "bytes": &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 32, + Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).", + }, + + "format": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "base64", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRandomWrite, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["random"][0]), + HelpDescription: strings.TrimSpace(sysHelp["random"][1]), + }, + } +} + +func (b *SystemBackend) internalPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "internal/specs/openapi", + Fields: map[string]*framework.FieldSchema{ + "context": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Context string appended to every operationId", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathInternalOpenAPI, + logical.UpdateOperation: b.pathInternalOpenAPI, + }, + }, + { + Pattern: "internal/specs/openapi", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalOpenAPI, + Summary: "Generate an OpenAPI 3 document of all mounted paths.", + }, + }, + }, + { + Pattern: "internal/ui/mounts", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalUIMountsRead, + Summary: "Lists all enabled and visible auth and secrets mounts.", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-mounts"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-ui-mounts"][1]), + }, + { + Pattern: "internal/ui/mounts/(?P.+)", + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The path of the mount.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalUIMountRead, + Summary: "Return information about the given mount.", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-mounts"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-ui-mounts"][1]), + }, + { + Pattern: "internal/ui/namespaces", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: pathInternalUINamespacesRead(b), + Unpublished: true, + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-namespaces"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-ui-namespaces"][1]), + }, + { + Pattern: "internal/ui/resultant-acl", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalUIResultantACL, + Unpublished: true, + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][1]), + }, + { + Pattern: "internal/counters/requests", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalCountersRequests, + Unpublished: true, + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-counters-requests"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-counters-requests"][1]), + }, + { + Pattern: "internal/counters/tokens", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalCountersTokens, + Unpublished: true, + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-counters-tokens"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-counters-tokens"][1]), + }, + { + Pattern: "internal/counters/entities", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathInternalCountersEntities, + Unpublished: true, + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["internal-counters-entities"][0]), + HelpDescription: strings.TrimSpace(sysHelp["internal-counters-entities"][1]), + }, + } +} + +func (b *SystemBackend) capabilitiesPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "capabilities-accessor$", + + Fields: map[string]*framework.FieldSchema{ + "accessor": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Accessor of the token for which capabilities are being queried.", + }, + "path": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Use 'paths' instead.", + Deprecated: true, + }, + "paths": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Paths on which capabilities are being queried.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleCapabilitiesAccessor, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_accessor"][0]), + HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]), + }, + + { + Pattern: "capabilities$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token for which capabilities are being queried.", + }, + "path": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Use 'paths' instead.", + Deprecated: true, + }, + "paths": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Paths on which capabilities are being queried.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleCapabilities, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["capabilities"][0]), + HelpDescription: strings.TrimSpace(sysHelp["capabilities"][1]), + }, + + { + Pattern: "capabilities-self$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token for which capabilities are being queried.", + }, + "path": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Use 'paths' instead.", + Deprecated: true, + }, + "paths": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Paths on which capabilities are being queried.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleCapabilities, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_self"][0]), + HelpDescription: strings.TrimSpace(sysHelp["capabilities_self"][1]), + }, + } +} + +func (b *SystemBackend) leasePaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "leases/lookup/(?P.+?)?", + + Fields: map[string]*framework.FieldSchema{ + "prefix": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.handleLeaseLookupList, + Summary: "Returns a list of lease ids.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]), + HelpDescription: strings.TrimSpace(sysHelp["leases"][1]), + }, + + { + Pattern: "leases/lookup", + + Fields: map[string]*framework.FieldSchema{ + "lease_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["lease_id"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleLeaseLookup, + Summary: "Retrieve lease metadata.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]), + HelpDescription: strings.TrimSpace(sysHelp["leases"][1]), + }, + + { + Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"), + + Fields: map[string]*framework.FieldSchema{ + "url_lease_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["lease_id"][0]), + }, + "lease_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["lease_id"][0]), + }, + "increment": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: strings.TrimSpace(sysHelp["increment"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRenew, + Summary: "Renews a lease, requesting to extend the lease.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["renew"][0]), + HelpDescription: strings.TrimSpace(sysHelp["renew"][1]), + }, + + { + Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"), + + Fields: map[string]*framework.FieldSchema{ + "url_lease_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["lease_id"][0]), + }, + "lease_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["lease_id"][0]), + }, + "sync": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: strings.TrimSpace(sysHelp["revoke-sync"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRevoke, + Summary: "Revokes a lease immediately.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["revoke"][0]), + HelpDescription: strings.TrimSpace(sysHelp["revoke"][1]), + }, + + { + Pattern: "(leases/)?revoke-force/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "prefix": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRevokeForce, + Summary: "Revokes all secrets or tokens generated under a given prefix immediately", + Description: "Unlike `/sys/leases/revoke-prefix`, this path ignores backend errors encountered during revocation. This is potentially very dangerous and should only be used in specific emergency situations where errors in the backend or the connected backend service prevent normal revocation.\n\nBy ignoring these errors, Vault abdicates responsibility for ensuring that the issued credentials or secrets are properly revoked and/or cleaned up. Access to this endpoint should be tightly controlled.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["revoke-force"][0]), + HelpDescription: strings.TrimSpace(sysHelp["revoke-force"][1]), + }, + + { + Pattern: "(leases/)?revoke-prefix/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "prefix": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]), + }, + "sync": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: strings.TrimSpace(sysHelp["revoke-sync"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRevokePrefix, + Summary: "Revokes all secrets (via a lease ID prefix) or tokens (via the tokens' path property) generated under a given prefix immediately.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["revoke-prefix"][0]), + HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]), + }, + + { + Pattern: "leases/tidy$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleTidyLeases, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]), + HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]), + }, + } +} + +func (b *SystemBackend) remountPath() *framework.Path { + return &framework.Path{ + Pattern: "remount", + + Fields: map[string]*framework.FieldSchema{ + "from": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The previous mount point.", + }, + "to": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The new mount point.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleRemount, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]), + HelpDescription: strings.TrimSpace(sysHelp["remount"][1]), + } +} + +func (b *SystemBackend) metricsPath() *framework.Path { + return &framework.Path{ + Pattern: "metrics", + Fields: map[string]*framework.FieldSchema{ + "format": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Format to export metrics into. Currently accepts only \"prometheus\".", + Query: true, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleMetrics, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["metrics"][0]), + HelpDescription: strings.TrimSpace(sysHelp["metrics"][1]), + } + +} + +func (b *SystemBackend) hostInfoPath() *framework.Path { + return &framework.Path{ + Pattern: "host-info/?", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleHostInfo, + Summary: strings.TrimSpace(sysHelp["host-info"][0]), + Description: strings.TrimSpace(sysHelp["host-info"][1]), + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["host-info"][0]), + HelpDescription: strings.TrimSpace(sysHelp["host-info"][1]), + } +} + +func (b *SystemBackend) authPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "auth$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleAuthTable, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["auth-table"][0]), + HelpDescription: strings.TrimSpace(sysHelp["auth-table"][1]), + }, + { + Pattern: "auth/(?P.+?)/tune$", + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_tune"][0]), + }, + "default_lease_ttl": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]), + }, + "max_lease_ttl": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), + }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_desc"][0]), + }, + "audit_non_hmac_request_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]), + }, + "audit_non_hmac_response_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]), + }, + "options": &framework.FieldSchema{ + Type: framework.TypeKVPairs, + Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]), + }, + "listing_visibility": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["listing_visibility"][0]), + }, + "passthrough_request_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]), + }, + "allowed_response_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["allowed_response_headers"][0]), + }, + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["token_type"][0]), + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleAuthTuneRead, + Summary: "Reads the given auth path's configuration.", + Description: "This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via `sys/mounts/auth/[auth-path]/tune`.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleAuthTuneWrite, + Summary: "Tune configuration parameters for a given auth path.", + Description: "This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via `sys/mounts/auth/[auth-path]/tune`.", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["auth_tune"][0]), + HelpDescription: strings.TrimSpace(sysHelp["auth_tune"][1]), + }, + { + Pattern: "auth/(?P.+)", + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_path"][0]), + }, + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_type"][0]), + }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_desc"][0]), + }, + "config": &framework.FieldSchema{ + Type: framework.TypeMap, + Description: strings.TrimSpace(sysHelp["auth_config"][0]), + }, + "local": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["mount_local"][0]), + }, + "seal_wrap": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["seal_wrap"][0]), + }, + "external_entropy_access": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["external_entropy_access"][0]), + }, + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_plugin"][0]), + }, + "options": &framework.FieldSchema{ + Type: framework.TypeKVPairs, + Description: strings.TrimSpace(sysHelp["auth_options"][0]), + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleEnableAuth, + Summary: "Enables a new auth method.", + Description: `After enabling, the auth method can be accessed and configured via the auth path specified as part of the URL. This auth path will be nested under the auth prefix. + +For example, enable the "foo" auth method will make it accessible at /auth/foo.`, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleDisableAuth, + Summary: "Disable the auth method at the given auth path", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["auth"][0]), + HelpDescription: strings.TrimSpace(sysHelp["auth"][1]), + }, + } +} + +func (b *SystemBackend) policyPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "policy/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handlePoliciesList(PolicyTypeACL), + logical.ListOperation: b.handlePoliciesList(PolicyTypeACL), + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]), + HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]), + }, + + { + Pattern: "policy/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["policy-name"][0]), + }, + "rules": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["policy-rules"][0]), + Deprecated: true, + }, + "policy": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["policy-rules"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePoliciesRead(PolicyTypeACL), + Summary: "Retrieve the policy body for the named policy.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handlePoliciesSet(PolicyTypeACL), + Summary: "Add a new or update an existing policy.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handlePoliciesDelete(PolicyTypeACL), + Summary: "Delete the policy with the given name.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]), + HelpDescription: strings.TrimSpace(sysHelp["policy"][1]), + }, + + { + Pattern: "policies/acl/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.handlePoliciesList(PolicyTypeACL), + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]), + HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]), + }, + + { + Pattern: "policies/acl/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["policy-name"][0]), + }, + "policy": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["policy-rules"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePoliciesRead(PolicyTypeACL), + Summary: "Retrieve information about the named ACL policy.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handlePoliciesSet(PolicyTypeACL), + Summary: "Add a new or update an existing ACL policy.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handlePoliciesDelete(PolicyTypeACL), + Summary: "Delete the ACL policy with the given name.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]), + HelpDescription: strings.TrimSpace(sysHelp["policy"][1]), + }, + } +} + +func (b *SystemBackend) wrappingPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "wrapping/wrap$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleWrappingWrap, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["wrap"][0]), + HelpDescription: strings.TrimSpace(sysHelp["wrap"][1]), + }, + + { + Pattern: "wrapping/unwrap$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleWrappingUnwrap, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["unwrap"][0]), + HelpDescription: strings.TrimSpace(sysHelp["unwrap"][1]), + }, + + { + Pattern: "wrapping/lookup$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleWrappingLookup, + Summary: "Look up wrapping properties for the given token.", + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleWrappingLookup, + Summary: "Look up wrapping properties for the requester's token.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]), + HelpDescription: strings.TrimSpace(sysHelp["wraplookup"][1]), + }, + + { + Pattern: "wrapping/rewrap$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleWrappingRewrap, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["rewrap"][0]), + HelpDescription: strings.TrimSpace(sysHelp["rewrap"][1]), + }, + } +} + +func (b *SystemBackend) mountPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "mounts/(?P.+?)/tune$", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_path"][0]), + }, + "default_lease_ttl": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]), + }, + "max_lease_ttl": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), + }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_desc"][0]), + }, + "audit_non_hmac_request_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]), + }, + "audit_non_hmac_response_keys": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]), + }, + "options": &framework.FieldSchema{ + Type: framework.TypeKVPairs, + Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]), + }, + "listing_visibility": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["listing_visibility"][0]), + }, + "passthrough_request_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]), + }, + "allowed_response_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["allowed_response_headers"][0]), + }, + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["token_type"][0]), + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleMountTuneRead, + logical.UpdateOperation: b.handleMountTuneWrite, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["mount_tune"][0]), + HelpDescription: strings.TrimSpace(sysHelp["mount_tune"][1]), + }, + + { + Pattern: "mounts/(?P.+?)", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_path"][0]), + }, + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_type"][0]), + }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_desc"][0]), + }, + "config": &framework.FieldSchema{ + Type: framework.TypeMap, + Description: strings.TrimSpace(sysHelp["mount_config"][0]), + }, + "local": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["mount_local"][0]), + }, + "seal_wrap": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["seal_wrap"][0]), + }, + "external_entropy_access": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: strings.TrimSpace(sysHelp["external_entropy_access"][0]), + }, + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]), + }, + "options": &framework.FieldSchema{ + Type: framework.TypeKVPairs, + Description: strings.TrimSpace(sysHelp["mount_options"][0]), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleMount, + Summary: "Enable a new secrets engine at the given path.", + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.handleUnmount, + Summary: "Disable the mount point specified at the given path.", + }, + }, + HelpSynopsis: strings.TrimSpace(sysHelp["mount"][0]), + HelpDescription: strings.TrimSpace(sysHelp["mount"][1]), + }, + + { + Pattern: "mounts$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleMountTable, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["mounts"][0]), + HelpDescription: strings.TrimSpace(sysHelp["mounts"][1]), + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_pprof.go b/vendor/github.com/hashicorp/vault/vault/logical_system_pprof.go new file mode 100644 index 00000000..03db5495 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_pprof.go @@ -0,0 +1,212 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "net/http/pprof" + "strconv" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *SystemBackend) pprofPaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "pprof/$", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofIndex, + Summary: "Returns an HTML page listing the available profiles.", + Description: `Returns an HTML page listing the available +profiles. This should be mainly accessed via browsers or applications that can +render pages.`, + }, + }, + }, + { + Pattern: "pprof/cmdline", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofCmdline, + Summary: "Returns the running program's command line.", + Description: "Returns the running program's command line, with arguments separated by NUL bytes.", + }, + }, + }, + { + Pattern: "pprof/goroutine", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofGoroutine, + Summary: "Returns stack traces of all current goroutines.", + Description: "Returns stack traces of all current goroutines.", + }, + }, + }, + { + Pattern: "pprof/heap", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofHeap, + Summary: "Returns a sampling of memory allocations of live object.", + Description: "Returns a sampling of memory allocations of live object.", + }, + }, + }, + { + Pattern: "pprof/profile", + + Fields: map[string]*framework.FieldSchema{ + "seconds": { + Type: framework.TypeInt, + Description: "If provided, specifies the duration to run the profiling command.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofProfile, + Summary: "Returns a pprof-formatted cpu profile payload.", + Description: "Returns a pprof-formatted cpu profile payload. Profiling lasts for duration specified in seconds GET parameter, or for 30 seconds if not specified.", + }, + }, + }, + { + Pattern: "pprof/symbol", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofSymbol, + Summary: "Returns the program counters listed in the request.", + Description: "Returns the program counters listed in the request.", + }, + }, + }, + + { + Pattern: "pprof/trace", + + Fields: map[string]*framework.FieldSchema{ + "seconds": { + Type: framework.TypeInt, + Description: "If provided, specifies the duration to run the tracing command.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handlePprofTrace, + Summary: "Returns the execution trace in binary form.", + Description: "Returns the execution trace in binary form. Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.", + }, + }, + }, + } +} + +func (b *SystemBackend) handlePprofIndex(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + pprof.Index(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofCmdline(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + pprof.Cmdline(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofGoroutine(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + pprof.Handler("goroutine").ServeHTTP(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofHeap(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + pprof.Handler("heap").ServeHTTP(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofProfile(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + // Return an error if seconds exceeds max request duration. This follows a + // similar behavior to how pprof treats seconds > WriteTimeout (i.e. it + // error with a 400), and avoids drift between what gets audited vs what + // ends up happening. + if secQueryVal := req.HTTPRequest.FormValue("seconds"); secQueryVal != "" { + maxDur := int64(DefaultMaxRequestDuration.Seconds()) + sec, _ := strconv.ParseInt(secQueryVal, 10, 64) + if sec > maxDur { + return logical.ErrorResponse(fmt.Sprintf("seconds %d exceeds max request duration of %d", sec, maxDur)), nil + } + } + + pprof.Profile(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofSymbol(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + pprof.Symbol(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +func (b *SystemBackend) handlePprofTrace(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := checkRequestHandlerParams(req); err != nil { + return nil, err + } + + // Return an error if seconds exceeds max request duration. This follows a + // similar behavior to how pprof treats seconds > WriteTimeout (i.e. it + // error with a 400), and avoids drift between what gets audited vs what + // ends up happening. + if secQueryVal := req.HTTPRequest.FormValue("seconds"); secQueryVal != "" { + maxDur := int64(DefaultMaxRequestDuration.Seconds()) + sec, _ := strconv.ParseInt(secQueryVal, 10, 64) + if sec > maxDur { + return logical.ErrorResponse(fmt.Sprintf("seconds %d exceeds max request duration of %d", sec, maxDur)), nil + } + } + + pprof.Trace(req.ResponseWriter, req.HTTPRequest) + return nil, nil +} + +// checkRequestHandlerParams is a helper that checks for the existence of the +// HTTP request and response writer in a logical.Request. +func checkRequestHandlerParams(req *logical.Request) error { + if req.ResponseWriter == nil { + return errors.New("no writer for request") + } + + if req.HTTPRequest == nil || req.HTTPRequest.Body == nil { + return errors.New("no reader for request") + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_raft.go b/vendor/github.com/hashicorp/vault/vault/logical_system_raft.go new file mode 100644 index 00000000..a00caaa9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_raft.go @@ -0,0 +1,455 @@ +package vault + +import ( + "context" + "crypto/subtle" + "encoding/base64" + "errors" + "strings" + + proto "github.com/golang/protobuf/proto" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" +) + +// raftStoragePaths returns paths for use when raft is the storage mechanism. +func (b *SystemBackend) raftStoragePaths() []*framework.Path { + return []*framework.Path{ + { + Pattern: "storage/raft/bootstrap/answer", + + Fields: map[string]*framework.FieldSchema{ + "server_id": { + Type: framework.TypeString, + }, + "answer": { + Type: framework.TypeString, + }, + "cluster_addr": { + Type: framework.TypeString, + }, + "non_voter": { + Type: framework.TypeBool, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRaftBootstrapAnswerWrite(), + Summary: "Accepts an answer from the peer to be joined to the fact cluster.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-bootstrap-answer"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-bootstrap-answer"][1]), + }, + { + Pattern: "storage/raft/bootstrap/challenge", + + Fields: map[string]*framework.FieldSchema{ + "server_id": { + Type: framework.TypeString, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRaftBootstrapChallengeWrite(), + Summary: "Creates a challenge for the new peer to be joined to the raft cluster.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-bootstrap-challenge"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-bootstrap-challenge"][1]), + }, + { + Pattern: "storage/raft/remove-peer", + + Fields: map[string]*framework.FieldSchema{ + "server_id": { + Type: framework.TypeString, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleRaftRemovePeerUpdate(), + Summary: "Remove a peer from the raft cluster.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][1]), + }, + { + Pattern: "storage/raft/configuration", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleRaftConfigurationGet(), + Summary: "Returns the configuration of the raft cluster.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-configuration"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-configuration"][1]), + }, + { + Pattern: "storage/raft/snapshot", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleStorageRaftSnapshotRead(), + Summary: "Returns a snapshot of the current state of vault.", + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleStorageRaftSnapshotWrite(false), + Summary: "Installs the provided snapshot, returning the cluster to the state defined in it.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-snapshot"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-snapshot"][1]), + }, + { + Pattern: "storage/raft/snapshot-force", + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.handleStorageRaftSnapshotWrite(true), + Summary: "Installs the provided snapshot, returning the cluster to the state defined in it. This bypasses checks ensuring the current Autounseal or Shamir keys are consistent with the snapshot data.", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-snapshot-force"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-snapshot-force"][1]), + }, + } +} + +func (b *SystemBackend) handleRaftConfigurationGet() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + + raftStorage, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + + config, err := raftStorage.GetConfiguration(ctx) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "config": config, + }, + }, nil + } +} + +func (b *SystemBackend) handleRaftRemovePeerUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + serverID := d.Get("server_id").(string) + if len(serverID) == 0 { + return logical.ErrorResponse("no server id provided"), logical.ErrInvalidRequest + } + + raftStorage, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + + if err := raftStorage.RemovePeer(ctx, serverID); err != nil { + return nil, err + } + if b.Core.raftFollowerStates != nil { + b.Core.raftFollowerStates.delete(serverID) + } + + return nil, nil + } +} + +func (b *SystemBackend) handleRaftBootstrapChallengeWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + _, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + + serverID := d.Get("server_id").(string) + if len(serverID) == 0 { + return logical.ErrorResponse("no server id provided"), logical.ErrInvalidRequest + } + + uuid, err := uuid.GenerateRandomBytes(16) + if err != nil { + return nil, err + } + + sealAccess := b.Core.seal.GetAccess() + eBlob, err := sealAccess.Encrypt(ctx, uuid) + if err != nil { + return nil, err + } + protoBlob, err := proto.Marshal(eBlob) + if err != nil { + return nil, err + } + + b.Core.pendingRaftPeers[serverID] = uuid + sealConfig, err := b.Core.seal.BarrierConfig(ctx) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "challenge": base64.StdEncoding.EncodeToString(protoBlob), + "seal_config": sealConfig, + }, + }, nil + } +} + +func (b *SystemBackend) handleRaftBootstrapAnswerWrite() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + raftStorage, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + + serverID := d.Get("server_id").(string) + if len(serverID) == 0 { + return logical.ErrorResponse("no server_id provided"), logical.ErrInvalidRequest + } + answerRaw := d.Get("answer").(string) + if len(answerRaw) == 0 { + return logical.ErrorResponse("no answer provided"), logical.ErrInvalidRequest + } + clusterAddr := d.Get("cluster_addr").(string) + if len(clusterAddr) == 0 { + return logical.ErrorResponse("no cluster_addr provided"), logical.ErrInvalidRequest + } + + nonVoter := d.Get("non_voter").(bool) + + answer, err := base64.StdEncoding.DecodeString(answerRaw) + if err != nil { + return logical.ErrorResponse("could not base64 decode answer"), logical.ErrInvalidRequest + } + + expectedAnswer, ok := b.Core.pendingRaftPeers[serverID] + if !ok { + return logical.ErrorResponse("no expected answer for the server id provided"), logical.ErrInvalidRequest + } + + delete(b.Core.pendingRaftPeers, serverID) + + if subtle.ConstantTimeCompare(answer, expectedAnswer) == 0 { + return logical.ErrorResponse("invalid answer given"), logical.ErrInvalidRequest + } + + tlsKeyringEntry, err := b.Core.barrier.Get(ctx, raftTLSStoragePath) + if err != nil { + return nil, err + } + if tlsKeyringEntry == nil { + return nil, errors.New("could not find raft TLS configuration") + } + var keyring raft.TLSKeyring + if err := tlsKeyringEntry.DecodeJSON(&keyring); err != nil { + return nil, errors.New("could not decode raft TLS configuration") + } + + switch nonVoter { + case true: + err = raftStorage.AddNonVotingPeer(ctx, serverID, clusterAddr) + default: + err = raftStorage.AddPeer(ctx, serverID, clusterAddr) + } + if err != nil { + return nil, err + } + + if b.Core.raftFollowerStates != nil { + b.Core.raftFollowerStates.update(serverID, 0) + } + + peers, err := raftStorage.Peers(ctx) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "peers": peers, + "tls_keyring": &keyring, + }, + }, nil + } +} + +func (b *SystemBackend) handleStorageRaftSnapshotRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + raftStorage, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + if req.ResponseWriter == nil { + return nil, errors.New("no writer for request") + } + + err := raftStorage.Snapshot(req.ResponseWriter, b.Core.seal.GetAccess()) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +func (b *SystemBackend) handleStorageRaftSnapshotWrite(force bool) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + raftStorage, ok := b.Core.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return logical.ErrorResponse("raft storage is not in use"), logical.ErrInvalidRequest + } + if req.HTTPRequest == nil || req.HTTPRequest.Body == nil { + return nil, errors.New("no reader for request") + } + + access := b.Core.seal.GetAccess() + if force { + access = nil + } + + // We want to buffer the http request reader into a temp file here so we + // don't have to hold the full snapshot in memory. We also want to do + // the restore in two parts so we can restore the snapshot while the + // stateLock is write locked. + snapFile, cleanup, metadata, err := raftStorage.WriteSnapshotToTemp(req.HTTPRequest.Body, access) + switch { + case err == nil: + case strings.Contains(err.Error(), "failed to open the sealed hashes"): + switch b.Core.seal.BarrierType() { + case seal.Shamir: + return logical.ErrorResponse("could not verify hash file, possibly the snapshot is using a different set of unseal keys; use the snapshot-force API to bypass this check"), logical.ErrInvalidRequest + default: + return logical.ErrorResponse("could not verify hash file, possibly the snapshot is using a different autoseal key; use the snapshot-force API to bypass this check"), logical.ErrInvalidRequest + } + case err != nil: + b.Core.logger.Error("raft snapshot restore: failed to write snapshot", "error", err) + return nil, err + } + + // We want to do this in a go routine so we can upgrade the lock and + // allow the client to disconnect. + go func() (retErr error) { + // Cleanup the temp file + defer cleanup() + + // Grab statelock + if stopped := grabLockOrStop(b.Core.stateLock.Lock, b.Core.stateLock.Unlock, b.Core.standbyStopCh); stopped { + b.Core.logger.Error("not applying snapshot; shutting down") + return + } + defer b.Core.stateLock.Unlock() + + // If we failed to restore the snapshot we should seal this node as + // it's in an unknown state + defer func() { + if retErr != nil { + if err := b.Core.sealInternalWithOptions(false, false, true); err != nil { + b.Core.logger.Error("failed to seal node", "error", err) + } + } + }() + + ctx, ctxCancel := context.WithCancel(namespace.RootContext(nil)) + + // We are calling the callback function synchronously here while we + // have the lock. So set it to nil and restore the callback when we + // finish. + raftStorage.SetRestoreCallback(nil) + defer raftStorage.SetRestoreCallback(b.Core.raftSnapshotRestoreCallback(true, true)) + + // Do a preSeal to clear vault's in-memory caches and shut down any + // systems that might be holding the encryption access. + b.Core.logger.Info("shutting down prior to restoring snapshot") + if err := b.Core.preSeal(); err != nil { + b.Core.logger.Error("raft snapshot restore failed preSeal", "error", err) + return err + } + + b.Core.logger.Info("applying snapshot") + if err := raftStorage.RestoreSnapshot(ctx, metadata, snapFile); err != nil { + b.Core.logger.Error("error while restoring raft snapshot", "error", err) + return err + } + + // Run invalidation logic synchronously here + callback := b.Core.raftSnapshotRestoreCallback(false, false) + if err := callback(ctx); err != nil { + return err + } + + { + // If the snapshot was taken while another node was leader we + // need to reset the leader information to this node. + if err := b.Core.underlyingPhysical.Put(ctx, &physical.Entry{ + Key: CoreLockPath, + Value: []byte(b.Core.leaderUUID), + }); err != nil { + b.Core.logger.Error("cluster setup failed", "error", err) + return err + } + // re-advertise our cluster information + if err := b.Core.advertiseLeader(ctx, b.Core.leaderUUID, nil); err != nil { + b.Core.logger.Error("cluster setup failed", "error", err) + return err + } + } + if err := b.Core.postUnseal(ctx, ctxCancel, standardUnsealStrategy{}); err != nil { + b.Core.logger.Error("raft snapshot restore failed postUnseal", "error", err) + return err + } + + return nil + + }() + + return nil, nil + } +} + +var sysRaftHelp = map[string][2]string{ + "raft-bootstrap-challenge": { + "Creates a challenge for the new peer to be joined to the raft cluster.", + "", + }, + "raft-bootstrap-answer": { + "Accepts an answer from the peer to be joined to the fact cluster.", + "", + }, + "raft-configuration": { + "Returns the raft cluster configuration.", + "", + }, + "raft-remove-peer": { + "Removes a peer from the raft cluster.", + "", + }, + "raft-snapshot": { + "Restores and saves snapshots from the raft cluster.", + "", + }, + "raft-snapshot-force": { + "Force restore a raft cluster snapshot", + "", + }, +} diff --git a/vendor/github.com/hashicorp/vault/vault/mount.go b/vendor/github.com/hashicorp/vault/vault/mount.go new file mode 100644 index 00000000..1a8c2c0e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/mount.go @@ -0,0 +1,1458 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" +) + +const ( + // coreMountConfigPath is used to store the mount configuration. + // Mounts are protected within the Vault itself, which means they + // can only be viewed or modified after an unseal. + coreMountConfigPath = "core/mounts" + + // coreLocalMountConfigPath is used to store mount configuration for local + // (non-replicated) mounts + coreLocalMountConfigPath = "core/local-mounts" + + // backendBarrierPrefix is the prefix to the UUID used in the + // barrier view for the backends. + backendBarrierPrefix = "logical/" + + // systemBarrierPrefix is the prefix used for the + // system logical backend. + systemBarrierPrefix = "sys/" + + // mountTableType is the value we expect to find for the mount table and + // corresponding entries + mountTableType = "mounts" +) + +// ListingVisibilityType represents the types for listing visibility +type ListingVisibilityType string + +const ( + // ListingVisibilityDefault is the default value for listing visibility + ListingVisibilityDefault ListingVisibilityType = "" + // ListingVisibilityHidden is the hidden type for listing visibility + ListingVisibilityHidden ListingVisibilityType = "hidden" + // ListingVisibilityUnauth is the unauth type for listing visibility + ListingVisibilityUnauth ListingVisibilityType = "unauth" + + systemMountPath = "sys/" + identityMountPath = "identity/" + cubbyholeMountPath = "cubbyhole/" + + systemMountType = "system" + identityMountType = "identity" + cubbyholeMountType = "cubbyhole" + pluginMountType = "plugin" + + MountTableUpdateStorage = true + MountTableNoUpdateStorage = false +) + +var ( + // loadMountsFailed if loadMounts encounters an error + errLoadMountsFailed = errors.New("failed to setup mount table") + + // protectedMounts cannot be remounted + protectedMounts = []string{ + "audit/", + "auth/", + systemMountPath, + cubbyholeMountPath, + identityMountPath, + } + + untunableMounts = []string{ + cubbyholeMountPath, + systemMountPath, + "audit/", + identityMountPath, + } + + // singletonMounts can only exist in one location and are + // loaded by default. These are types, not paths. + singletonMounts = []string{ + cubbyholeMountType, + systemMountType, + "token", + identityMountType, + } + + // mountAliases maps old backend names to new backend names, allowing us + // to move/rename backends but maintain backwards compatibility + mountAliases = map[string]string{"generic": "kv"} +) + +func (c *Core) generateMountAccessor(entryType string) (string, error) { + var accessor string + for { + randBytes, err := uuid.GenerateRandomBytes(4) + if err != nil { + return "", err + } + accessor = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4])) + if entry := c.router.MatchingMountByAccessor(accessor); entry == nil { + break + } + } + + return accessor, nil +} + +// MountTable is used to represent the internal mount table +type MountTable struct { + Type string `json:"type"` + Entries []*MountEntry `json:"entries"` +} + +// shallowClone returns a copy of the mount table that +// keeps the MountEntry locations, so as not to invalidate +// other locations holding pointers. Care needs to be taken +// if modifying entries rather than modifying the table itself +func (t *MountTable) shallowClone() *MountTable { + mt := &MountTable{ + Type: t.Type, + Entries: make([]*MountEntry, len(t.Entries)), + } + for i, e := range t.Entries { + mt.Entries[i] = e + } + return mt +} + +// setTaint is used to set the taint on given entry Accepts either the mount +// entry's path or namespace + path, i.e. /secret/ or /token/ +func (t *MountTable) setTaint(ctx context.Context, path string, value bool) (*MountEntry, error) { + n := len(t.Entries) + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + for i := 0; i < n; i++ { + if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID { + t.Entries[i].Tainted = value + return t.Entries[i], nil + } + } + return nil, nil +} + +// remove is used to remove a given path entry; returns the entry that was +// removed +func (t *MountTable) remove(ctx context.Context, path string) (*MountEntry, error) { + n := len(t.Entries) + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + for i := 0; i < n; i++ { + if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID { + t.Entries[i], t.Entries[n-1] = t.Entries[n-1], nil + t.Entries = t.Entries[:n-1] + return entry, nil + } + } + return nil, nil +} + +func (t *MountTable) find(ctx context.Context, path string) (*MountEntry, error) { + n := len(t.Entries) + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + for i := 0; i < n; i++ { + if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID { + return entry, nil + } + } + return nil, nil +} + +// sortEntriesByPath sorts the entries in the table by path and returns the +// table; this is useful for tests +func (t *MountTable) sortEntriesByPath() *MountTable { + sort.Slice(t.Entries, func(i, j int) bool { + return t.Entries[i].Path < t.Entries[j].Path + }) + return t +} + +// sortEntriesByPath sorts the entries in the table by path and returns the +// table; this is useful for tests +func (t *MountTable) sortEntriesByPathDepth() *MountTable { + sort.Slice(t.Entries, func(i, j int) bool { + return len(strings.Split(t.Entries[i].Namespace().Path+t.Entries[i].Path, "/")) < len(strings.Split(t.Entries[j].Namespace().Path+t.Entries[j].Path, "/")) + }) + return t +} + +// MountEntry is used to represent a mount table entry +type MountEntry struct { + Table string `json:"table"` // The table it belongs to + Path string `json:"path"` // Mount Path + Type string `json:"type"` // Logical backend Type + Description string `json:"description"` // User-provided description + UUID string `json:"uuid"` // Barrier view UUID + BackendAwareUUID string `json:"backend_aware_uuid"` // UUID that can be used by the backend as a helper when a consistent value is needed outside of storage. + Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is). + Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived) + Options map[string]string `json:"options"` // Backend options + Local bool `json:"local"` // Local mounts are not replicated or affected by replication + SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs + ExternalEntropyAccess bool `json:"external_entropy_access"` // Whether to allow external entropy source access + Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount + NamespaceID string `json:"namespace_id"` + + // namespace contains the populated namespace + namespace *namespace.Namespace + + // synthesizedConfigCache is used to cache configuration values. These + // particular values are cached since we want to get them at a point-in-time + // without separately managing their locks individually. See SyncCache() for + // the specific values that are being cached. + synthesizedConfigCache sync.Map +} + +// MountConfig is used to hold settable options +type MountConfig struct { + DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default + MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default + ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" structs:"allowed_response_headers" mapstructure:"allowed_response_headers"` + TokenType logical.TokenType `json:"token_type" structs:"token_type" mapstructure:"token_type"` + + // PluginName is the name of the plugin registered in the catalog. + // + // Deprecated: MountEntry.Type should be used instead for Vault 1.0.0 and beyond. + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +// APIMountConfig is an embedded struct of api.MountConfigInput +type APIMountConfig struct { + DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" structs:"allowed_response_headers" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type" structs:"token_type" mapstructure:"token_type"` + + // PluginName is the name of the plugin registered in the catalog. + // + // Deprecated: MountEntry.Type should be used instead for Vault 1.0.0 and beyond. + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +// Clone returns a deep copy of the mount entry +func (e *MountEntry) Clone() (*MountEntry, error) { + cp, err := copystructure.Copy(e) + if err != nil { + return nil, err + } + return cp.(*MountEntry), nil +} + +// Namespace returns the namespace for the mount entry +func (e *MountEntry) Namespace() *namespace.Namespace { + return e.namespace +} + +// APIPath returns the full API Path for the given mount entry +func (e *MountEntry) APIPath() string { + path := e.Path + if e.Table == credentialTableType { + path = credentialRoutePrefix + path + } + return e.namespace.Path + path +} + +// SyncCache syncs tunable configuration values to the cache. In the case of +// cached values, they should be retrieved via synthesizedConfigCache.Load() +// instead of accessing them directly through MountConfig. +func (e *MountEntry) SyncCache() { + if len(e.Config.AuditNonHMACRequestKeys) == 0 { + e.synthesizedConfigCache.Delete("audit_non_hmac_request_keys") + } else { + e.synthesizedConfigCache.Store("audit_non_hmac_request_keys", e.Config.AuditNonHMACRequestKeys) + } + + if len(e.Config.AuditNonHMACResponseKeys) == 0 { + e.synthesizedConfigCache.Delete("audit_non_hmac_response_keys") + } else { + e.synthesizedConfigCache.Store("audit_non_hmac_response_keys", e.Config.AuditNonHMACResponseKeys) + } + + if len(e.Config.PassthroughRequestHeaders) == 0 { + e.synthesizedConfigCache.Delete("passthrough_request_headers") + } else { + e.synthesizedConfigCache.Store("passthrough_request_headers", e.Config.PassthroughRequestHeaders) + } + + if len(e.Config.AllowedResponseHeaders) == 0 { + e.synthesizedConfigCache.Delete("allowed_response_headers") + } else { + e.synthesizedConfigCache.Store("allowed_response_headers", e.Config.AllowedResponseHeaders) + } +} + +func (c *Core) decodeMountTable(ctx context.Context, raw []byte) (*MountTable, error) { + // Decode into mount table + mountTable := new(MountTable) + if err := jsonutil.DecodeJSON(raw, mountTable); err != nil { + return nil, err + } + + // Populate the namespace in memory + var mountEntries []*MountEntry + for _, entry := range mountTable.Entries { + if entry.NamespaceID == "" { + entry.NamespaceID = namespace.RootNamespaceID + } + ns, err := NamespaceByID(ctx, entry.NamespaceID, c) + if err != nil { + return nil, err + } + if ns == nil { + c.logger.Error("namespace on mount entry not found", "namespace_id", entry.NamespaceID, "mount_path", entry.Path, "mount_description", entry.Description) + continue + } + + entry.namespace = ns + mountEntries = append(mountEntries, entry) + } + + return &MountTable{ + Type: mountTable.Type, + Entries: mountEntries, + }, nil +} + +// Mount is used to mount a new backend to the mount table. +func (c *Core) mount(ctx context.Context, entry *MountEntry) error { + // Ensure we end the path in a slash + if !strings.HasSuffix(entry.Path, "/") { + entry.Path += "/" + } + + // Prevent protected paths from being mounted + for _, p := range protectedMounts { + if strings.HasPrefix(entry.Path, p) && entry.namespace == nil { + return logical.CodedError(403, fmt.Sprintf("cannot mount %q", entry.Path)) + } + } + + // Do not allow more than one instance of a singleton mount + for _, p := range singletonMounts { + if entry.Type == p { + return logical.CodedError(403, fmt.Sprintf("mount type of %q is not mountable", entry.Type)) + } + } + + // Mount internally + if err := c.mountInternal(ctx, entry, MountTableUpdateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + c.logger.Error("failed to evaluate filtered paths", "error", err) + + // We failed to evaluate filtered paths so we are undoing the mount operation + if unmountInternalErr := c.unmountInternal(ctx, entry.Path, MountTableUpdateStorage); unmountInternalErr != nil { + c.logger.Error("failed to unmount", "error", unmountInternalErr) + } + return err + } + + return nil +} + +func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStorage bool) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + if err := verifyNamespace(c, ns, entry); err != nil { + return err + } + + entry.NamespaceID = ns.ID + entry.namespace = ns + + // Ensure the cache is populated, don't need the result + NamespaceByID(ctx, ns.ID, c) + + // Basic check for matching names + for _, ent := range c.mounts.Entries { + if ns.ID == ent.NamespaceID { + switch { + // Existing is oauth/github/ new is oauth/ or + // existing is oauth/ and new is oauth/github/ + case strings.HasPrefix(ent.Path, entry.Path): + fallthrough + case strings.HasPrefix(entry.Path, ent.Path): + return logical.CodedError(409, fmt.Sprintf("path is already in use at %s", ent.Path)) + } + } + } + + // Verify there are no conflicting mounts in the router + if match := c.router.MountConflict(ctx, entry.Path); match != "" { + return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match)) + } + + // Generate a new UUID and view + if entry.UUID == "" { + entryUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.UUID = entryUUID + } + if entry.BackendAwareUUID == "" { + bUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.BackendAwareUUID = bUUID + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor(entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } + // Sync values to the cache + entry.SyncCache() + + viewPath := entry.ViewPath() + view := NewBarrierView(c.barrier, viewPath) + + // Singleton mounts cannot be filtered manually on a per-secondary basis + // from replication. + if strutil.StrListContains(singletonMounts, entry.Type) { + addFilterablePath(c, viewPath) + } + + nilMount, err := preprocessMount(c, entry, view) + if err != nil { + return err + } + origReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + // We defer this because we're already up and running so we don't need to + // time it for after postUnseal + defer view.setReadOnlyErr(origReadOnlyErr) + + var backend logical.Backend + sysView := c.mountEntrySysView(entry) + + backend, err = c.newLogicalBackend(ctx, entry, sysView, view) + if err != nil { + return err + } + if backend == nil { + return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type) + } + + // Check for the correct backend type + backendType := backend.Type() + if backendType != logical.TypeLogical { + if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { + return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) + } + } + + addPathCheckers(c, entry, backend, viewPath) + + c.setCoreBackend(entry, backend, view) + + // If the mount is filtered or we are on a DR secondary we don't want to + // keep the actual backend running, so we clean it up and set it to nil + // so the router does not have a pointer to the object. + if nilMount { + backend.Cleanup(ctx) + backend = nil + } + + newTable := c.mounts.shallowClone() + newTable.Entries = append(newTable.Entries, entry) + if updateStorage { + if err := c.persistMounts(ctx, newTable, &entry.Local); err != nil { + c.logger.Error("failed to update mount table", "error", err) + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + + return logical.CodedError(500, "failed to update mount table") + } + } + c.mounts = newTable + + if err := c.router.Mount(backend, entry.Path, entry, view); err != nil { + return err + } + + if !nilMount { + // restore the original readOnlyErr, so we can write to the view in + // Initialize() if necessary + view.setReadOnlyErr(origReadOnlyErr) + // initialize, using the core's active context. + err := backend.Initialize(c.activeContext, &logical.InitializationRequest{Storage: view}) + if err != nil { + return err + } + } + + if c.logger.IsInfo() { + c.logger.Info("successful mount", "namespace", entry.Namespace().Path, "path", entry.Path, "type", entry.Type) + } + return nil +} + +// Unmount is used to unmount a path. The boolean indicates whether the mount +// was found. +func (c *Core) unmount(ctx context.Context, path string) error { + // Ensure we end the path in a slash + if !strings.HasSuffix(path, "/") { + path += "/" + } + + // Prevent protected paths from being unmounted + for _, p := range protectedMounts { + if strings.HasPrefix(path, p) { + return fmt.Errorf("cannot unmount %q", path) + } + } + + // Unmount mount internally + if err := c.unmountInternal(ctx, path, MountTableUpdateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + // Even we failed to evaluate filtered paths, the unmount operation was still successful + c.logger.Error("failed to evaluate filtered paths", "error", err) + } + return nil +} + +func (c *Core) unmountInternal(ctx context.Context, path string, updateStorage bool) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + // Verify exact match of the route + match := c.router.MatchingMount(ctx, path) + if match == "" || ns.Path+path != match { + return fmt.Errorf("no matching mount") + } + + // Get the view for this backend + view := c.router.MatchingStorageByAPIPath(ctx, path) + + // Get the backend/mount entry for this path, used to remove ignored + // replication prefixes + backend := c.router.MatchingBackend(ctx, path) + entry := c.router.MatchingMountEntry(ctx, path) + + // Mark the entry as tainted + if err := c.taintMountEntry(ctx, path, updateStorage); err != nil { + c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path) + return err + } + + // Taint the router path to prevent routing. Note that in-flight requests + // are uncertain, right now. + if err := c.router.Taint(ctx, path); err != nil { + return err + } + + rCtx := namespace.ContextWithNamespace(c.activeContext, ns) + if backend != nil && c.rollback != nil { + // Invoke the rollback manager a final time + if err := c.rollback.Rollback(rCtx, path); err != nil { + return err + } + } + if backend != nil && c.expiration != nil && updateStorage { + // Revoke all the dynamic keys + if err := c.expiration.RevokePrefix(rCtx, path, true); err != nil { + return err + } + } + + if backend != nil { + // Call cleanup function if it exists + backend.Cleanup(ctx) + } + + viewPath := entry.ViewPath() + switch { + case !updateStorage: + // Don't attempt to clear data, replication will handle this + case c.IsDRSecondary(): + // If we are a dr secondary we want to clear the view, but the provided + // view is marked as read only. We use the barrier here to get around + // it. + if err := logical.ClearViewWithLogging(ctx, NewBarrierView(c.barrier, viewPath), c.logger.Named("secrets.deletion").With("namespace", ns.ID, "path", path)); err != nil { + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) + return err + } + + case entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): + // Have writable storage, remove the whole thing + if err := logical.ClearViewWithLogging(ctx, view, c.logger.Named("secrets.deletion").With("namespace", ns.ID, "path", path)); err != nil { + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) + return err + } + + case !entry.Local && c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): + if err := clearIgnoredPaths(ctx, c, backend, viewPath); err != nil { + return err + } + } + + // Remove the mount table entry + if err := c.removeMountEntry(ctx, path, updateStorage); err != nil { + c.logger.Error("failed to remove mount entry for path being unmounted", "error", err, "path", path) + return err + } + + // Unmount the backend entirely + if err := c.router.Unmount(ctx, path); err != nil { + return err + } + + removePathCheckers(c, entry, viewPath) + + if c.logger.IsInfo() { + c.logger.Info("successfully unmounted", "path", path, "namespace", ns.Path) + } + + return nil +} + +// removeMountEntry is used to remove an entry from the mount table +func (c *Core) removeMountEntry(ctx context.Context, path string, updateStorage bool) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + // Remove the entry from the mount table + newTable := c.mounts.shallowClone() + entry, err := newTable.remove(ctx, path) + if err != nil { + return err + } + if entry == nil { + c.logger.Error("nil entry found removing entry in mounts table", "path", path) + return logical.CodedError(500, "failed to remove entry in mounts table") + } + + // When unmounting all entries the JSON code will load back up from storage + // as a nil slice, which kills tests...just set it nil explicitly + if len(newTable.Entries) == 0 { + newTable.Entries = nil + } + + if updateStorage { + // Update the mount table + if err := c.persistMounts(ctx, newTable, &entry.Local); err != nil { + c.logger.Error("failed to remove entry from mounts table", "error", err) + return logical.CodedError(500, "failed to remove entry from mounts table") + } + } + + c.mounts = newTable + return nil +} + +// taintMountEntry is used to mark an entry in the mount table as tainted +func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage bool) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + // As modifying the taint of an entry affects shallow clones, + // we simply use the original + entry, err := c.mounts.setTaint(ctx, path, true) + if err != nil { + return err + } + if entry == nil { + c.logger.Error("nil entry found tainting entry in mounts table", "path", path) + return logical.CodedError(500, "failed to taint entry in mounts table") + } + + if updateStorage { + // Update the mount table + if err := c.persistMounts(ctx, c.mounts, &entry.Local); err != nil { + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + + c.logger.Error("failed to taint entry in mounts table", "error", err) + return logical.CodedError(500, "failed to taint entry in mounts table") + } + } + + return nil +} + +// remountForceInternal takes a copy of the mount entry for the path and fully unmounts +// and remounts the backend to pick up any changes, such as filtered paths. +// Should be only used for internal usage. +func (c *Core) remountForceInternal(ctx context.Context, path string, updateStorage bool) error { + me := c.router.MatchingMountEntry(ctx, path) + if me == nil { + return fmt.Errorf("cannot find mount for path %q", path) + } + + me, err := me.Clone() + if err != nil { + return err + } + + if err := c.unmountInternal(ctx, path, updateStorage); err != nil { + return err + } + + // Mount internally + if err := c.mountInternal(ctx, me, updateStorage); err != nil { + return err + } + + // Re-evaluate filtered paths + if err := runFilteredPathsEvaluation(ctx, c); err != nil { + c.logger.Error("failed to evaluate filtered paths", "error", err) + return err + } + return nil +} + +// Remount is used to remount a path at a new mount point. +func (c *Core) remount(ctx context.Context, src, dst string, updateStorage bool) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + // Ensure we end the path in a slash + if !strings.HasSuffix(src, "/") { + src += "/" + } + if !strings.HasSuffix(dst, "/") { + dst += "/" + } + + // Prevent protected paths from being remounted + for _, p := range protectedMounts { + if strings.HasPrefix(src, p) { + return fmt.Errorf("cannot remount %q", src) + } + } + + // Verify exact match of the route + srcMatch := c.router.MatchingMountEntry(ctx, src) + if srcMatch == nil { + return fmt.Errorf("no matching mount at %q", src) + } + if srcMatch.NamespaceID != ns.ID { + return fmt.Errorf("source mount in a different namespace than request") + } + + if err := verifyNamespace(c, ns, &MountEntry{Path: dst}); err != nil { + return err + } + + if match := c.router.MatchingMount(ctx, dst); match != "" { + return fmt.Errorf("existing mount at %q", match) + } + + // Mark the entry as tainted + if err := c.taintMountEntry(ctx, src, updateStorage); err != nil { + return err + } + + // Taint the router path to prevent routing + if err := c.router.Taint(ctx, src); err != nil { + return err + } + + if !c.IsDRSecondary() { + // Invoke the rollback manager a final time + rCtx := namespace.ContextWithNamespace(c.activeContext, ns) + if c.rollback != nil { + if err := c.rollback.Rollback(rCtx, src); err != nil { + return err + } + } + + if entry := c.router.MatchingMountEntry(ctx, src); entry == nil { + return fmt.Errorf("no matching mount at %q", src) + } + + // Revoke all the dynamic keys + if err := c.expiration.RevokePrefix(rCtx, src, true); err != nil { + return err + } + } + + c.mountsLock.Lock() + var entry *MountEntry + for _, mountEntry := range c.mounts.Entries { + if mountEntry.Path == src && mountEntry.NamespaceID == ns.ID { + entry = mountEntry + entry.Path = dst + entry.Tainted = false + break + } + } + + if entry == nil { + c.mountsLock.Unlock() + c.logger.Error("failed to find entry in mounts table") + return logical.CodedError(500, "failed to find entry in mounts table") + } + + // Update the mount table + if err := c.persistMounts(ctx, c.mounts, &entry.Local); err != nil { + entry.Path = src + entry.Tainted = true + c.mountsLock.Unlock() + if err == logical.ErrReadOnly && c.perfStandby { + return err + } + + c.logger.Error("failed to update mounts table", "error", err) + return logical.CodedError(500, "failed to update mounts table") + } + c.mountsLock.Unlock() + + // Remount the backend + if err := c.router.Remount(ctx, src, dst); err != nil { + return err + } + + // Un-taint the path + if err := c.router.Untaint(ctx, dst); err != nil { + return err + } + + if c.logger.IsInfo() { + c.logger.Info("successful remount", "old_path", src, "new_path", dst) + } + return nil +} + +// loadMounts is invoked as part of postUnseal to load the mount table +func (c *Core) loadMounts(ctx context.Context) error { + // Load the existing mount table + raw, err := c.barrier.Get(ctx, coreMountConfigPath) + if err != nil { + c.logger.Error("failed to read mount table", "error", err) + return errLoadMountsFailed + } + rawLocal, err := c.barrier.Get(ctx, coreLocalMountConfigPath) + if err != nil { + c.logger.Error("failed to read local mount table", "error", err) + return errLoadMountsFailed + } + + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + if raw != nil { + // Check if the persisted value has canary in the beginning. If + // yes, decompress the table and then JSON decode it. If not, + // simply JSON decode it. + mountTable, err := c.decodeMountTable(ctx, raw.Value) + if err != nil { + c.logger.Error("failed to decompress and/or decode the mount table", "error", err) + return err + } + c.mounts = mountTable + } + + var needPersist bool + if c.mounts == nil { + c.logger.Info("no mounts; adding default mount table") + c.mounts = c.defaultMountTable() + needPersist = true + } + + if rawLocal != nil { + localMountTable, err := c.decodeMountTable(ctx, rawLocal.Value) + if err != nil { + c.logger.Error("failed to decompress and/or decode the local mount table", "error", err) + return err + } + if localMountTable != nil && len(localMountTable.Entries) > 0 { + c.mounts.Entries = append(c.mounts.Entries, localMountTable.Entries...) + } + } + + // Note that this is only designed to work with singletons, as it checks by + // type only. + + // Upgrade to typed mount table + if c.mounts.Type == "" { + c.mounts.Type = mountTableType + needPersist = true + } + + for _, requiredMount := range c.requiredMountTable().Entries { + foundRequired := false + for _, coreMount := range c.mounts.Entries { + if coreMount.Type == requiredMount.Type { + foundRequired = true + coreMount.Config = requiredMount.Config + break + } + } + + // In a replication scenario we will let sync invalidation take + // care of creating a new required mount that doesn't exist yet. + // This should only happen in the upgrade case where a new one is + // introduced on the primary; otherwise initial bootstrapping will + // ensure this comes over. If we upgrade first, we simply don't + // create the mount, so we won't conflict when we sync. If this is + // local (e.g. cubbyhole) we do still add it. + if !foundRequired && (!c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) || requiredMount.Local) { + c.mounts.Entries = append(c.mounts.Entries, requiredMount) + needPersist = true + } + } + + // Upgrade to table-scoped entries + for _, entry := range c.mounts.Entries { + if entry.Type == cubbyholeMountType && !entry.Local { + entry.Local = true + needPersist = true + } + if entry.Table == "" { + entry.Table = c.mounts.Type + needPersist = true + } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor(entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } + if entry.BackendAwareUUID == "" { + bUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.BackendAwareUUID = bUUID + needPersist = true + } + + if entry.NamespaceID == "" { + entry.NamespaceID = namespace.RootNamespaceID + needPersist = true + } + ns, err := NamespaceByID(ctx, entry.NamespaceID, c) + if err != nil { + return err + } + if ns == nil { + return namespace.ErrNoNamespace + } + entry.namespace = ns + + // Sync values to the cache + entry.SyncCache() + } + + // Done if we have restored the mount table and we don't need + // to persist + if !needPersist { + return nil + } + + // Persist both mount tables + if err := c.persistMounts(ctx, c.mounts, nil); err != nil { + c.logger.Error("failed to persist mount table", "error", err) + return errLoadMountsFailed + } + return nil +} + +// persistMounts is used to persist the mount table after modification +func (c *Core) persistMounts(ctx context.Context, table *MountTable, local *bool) error { + if table.Type != mountTableType { + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType) + return fmt.Errorf("invalid table type given, not persisting") + } + + for _, entry := range table.Entries { + if entry.Table != table.Type { + c.logger.Error("given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + return fmt.Errorf("invalid mount entry found, not persisting") + } + } + + nonLocalMounts := &MountTable{ + Type: mountTableType, + } + + localMounts := &MountTable{ + Type: mountTableType, + } + + for _, entry := range table.Entries { + if entry.Local { + localMounts.Entries = append(localMounts.Entries, entry) + } else { + nonLocalMounts.Entries = append(nonLocalMounts.Entries, entry) + } + } + + writeTable := func(mt *MountTable, path string) error { + // Encode the mount table into JSON and compress it (lzw). + compressedBytes, err := jsonutil.EncodeJSONAndCompress(mt, nil) + if err != nil { + c.logger.Error("failed to encode or compress mount table", "error", err) + return err + } + + // Create an entry + entry := &logical.StorageEntry{ + Key: path, + Value: compressedBytes, + } + + // Write to the physical backend + if err := c.barrier.Put(ctx, entry); err != nil { + c.logger.Error("failed to persist mount table", "error", err) + return err + } + return nil + } + + var err error + switch { + case local == nil: + // Write non-local mounts + err := writeTable(nonLocalMounts, coreMountConfigPath) + if err != nil { + return err + } + + // Write local mounts + err = writeTable(localMounts, coreLocalMountConfigPath) + if err != nil { + return err + } + case *local: + // Write local mounts + err = writeTable(localMounts, coreLocalMountConfigPath) + default: + // Write non-local mounts + err = writeTable(nonLocalMounts, coreMountConfigPath) + } + + return err +} + +// setupMounts is invoked after we've loaded the mount table to +// initialize the logical backends and setup the router +func (c *Core) setupMounts(ctx context.Context) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + for _, entry := range c.mounts.sortEntriesByPathDepth().Entries { + // Initialize the backend, special casing for system + barrierPath := entry.ViewPath() + + // Create a barrier view using the UUID + view := NewBarrierView(c.barrier, barrierPath) + + // Singleton mounts cannot be filtered manually on a per-secondary basis + // from replication + if strutil.StrListContains(singletonMounts, entry.Type) { + addFilterablePath(c, barrierPath) + } + + // Determining the replicated state of the mount + nilMount, err := preprocessMount(c, entry, view) + if err != nil { + return err + } + origReadOnlyErr := view.getReadOnlyErr() + + // Mark the view as read-only until the mounting is complete and + // ensure that it is reset after. This ensures that there will be no + // writes during the construction of the backend. + view.setReadOnlyErr(logical.ErrSetupReadOnly) + if strutil.StrListContains(singletonMounts, entry.Type) { + defer view.setReadOnlyErr(origReadOnlyErr) + } else { + c.postUnsealFuncs = append(c.postUnsealFuncs, func() { + view.setReadOnlyErr(origReadOnlyErr) + }) + } + + var backend logical.Backend + // Create the new backend + sysView := c.mountEntrySysView(entry) + backend, err = c.newLogicalBackend(ctx, entry, sysView, view) + if err != nil { + c.logger.Error("failed to create mount entry", "path", entry.Path, "error", err) + if !c.builtinRegistry.Contains(entry.Type, consts.PluginTypeSecrets) { + // If we encounter an error instantiating the backend due to an error, + // skip backend initialization but register the entry to the mount table + // to preserve storage and path. + c.logger.Warn("skipping plugin-based mount entry", "path", entry.Path) + goto ROUTER_MOUNT + } + return errLoadMountsFailed + } + if backend == nil { + return fmt.Errorf("created mount entry of type %q is nil", entry.Type) + } + + { + // Check for the correct backend type + backendType := backend.Type() + + if backendType != logical.TypeLogical { + if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { + return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) + } + } + + addPathCheckers(c, entry, backend, barrierPath) + + c.setCoreBackend(entry, backend, view) + } + + // If the mount is filtered or we are on a DR secondary we don't want to + // keep the actual backend running, so we clean it up and set it to nil + // so the router does not have a pointer to the object. + if nilMount { + backend.Cleanup(ctx) + backend = nil + } + + ROUTER_MOUNT: + // Mount the backend + err = c.router.Mount(backend, entry.Path, entry, view) + if err != nil { + c.logger.Error("failed to mount entry", "path", entry.Path, "error", err) + return errLoadMountsFailed + } + + // Initialize + if !nilMount { + // Bind locally + localEntry := entry + c.postUnsealFuncs = append(c.postUnsealFuncs, func() { + if backend == nil { + c.logger.Error("skipping initialization on nil backend", "path", localEntry.Path) + return + } + + err := backend.Initialize(ctx, &logical.InitializationRequest{Storage: view}) + if err != nil { + c.logger.Error("failed to initialize mount entry", "path", localEntry.Path, "error", err) + } + }) + } + + if c.logger.IsInfo() { + c.logger.Info("successfully mounted backend", "type", entry.Type, "path", entry.Path) + } + + // Ensure the path is tainted if set in the mount table + if entry.Tainted { + c.router.Taint(ctx, entry.Path) + } + + // Ensure the cache is populated, don't need the result + NamespaceByID(ctx, entry.NamespaceID, c) + } + return nil +} + +// unloadMounts is used before we seal the vault to reset the mounts to +// their unloaded state, calling Cleanup if defined. This is reversed by load and setup mounts. +func (c *Core) unloadMounts(ctx context.Context) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + if c.mounts != nil { + mountTable := c.mounts.shallowClone() + for _, e := range mountTable.Entries { + backend := c.router.MatchingBackend(namespace.ContextWithNamespace(ctx, e.namespace), e.Path) + if backend != nil { + backend.Cleanup(ctx) + } + + viewPath := e.ViewPath() + removePathCheckers(c, e, viewPath) + } + } + + c.mounts = nil + c.router.reset() + c.systemBarrierView = nil + return nil +} + +// newLogicalBackend is used to create and configure a new logical backend by name +func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView logical.SystemView, view logical.Storage) (logical.Backend, error) { + t := entry.Type + if alias, ok := mountAliases[t]; ok { + t = alias + } + + f, ok := c.logicalBackends[t] + if !ok { + f = plugin.Factory + } + + // Set up conf to pass in plugin_name + conf := make(map[string]string, len(entry.Options)+1) + for k, v := range entry.Options { + conf[k] = v + } + + switch { + case entry.Type == "plugin": + conf["plugin_name"] = entry.Config.PluginName + default: + conf["plugin_name"] = t + } + + conf["plugin_type"] = consts.PluginTypeSecrets.String() + + backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)) + c.AddLogger(backendLogger) + config := &logical.BackendConfig{ + StorageView: view, + Logger: backendLogger, + Config: conf, + System: sysView, + BackendUUID: entry.BackendAwareUUID, + } + + ctx = context.WithValue(ctx, "core_number", c.coreNumber) + b, err := f(ctx, config) + if err != nil { + return nil, err + } + if b == nil { + return nil, fmt.Errorf("nil backend of type %q returned from factory", t) + } + addLicenseCallback(c, b) + + return b, nil +} + +// defaultMountTable creates a default mount table +func (c *Core) defaultMountTable() *MountTable { + table := &MountTable{ + Type: mountTableType, + } + table.Entries = append(table.Entries, c.requiredMountTable().Entries...) + + if os.Getenv("VAULT_INTERACTIVE_DEMO_SERVER") != "" { + mountUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create default secret mount UUID: %v", err)) + } + mountAccessor, err := c.generateMountAccessor("kv") + if err != nil { + panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err)) + } + bUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create default secret mount backend UUID: %v", err)) + } + + kvMount := &MountEntry{ + Table: mountTableType, + Path: "secret/", + Type: "kv", + Description: "key/value secret storage", + UUID: mountUUID, + Accessor: mountAccessor, + BackendAwareUUID: bUUID, + Options: map[string]string{ + "version": "2", + }, + } + table.Entries = append(table.Entries, kvMount) + } + + return table +} + +// requiredMountTable() creates a mount table with entries required +// to be available +func (c *Core) requiredMountTable() *MountTable { + table := &MountTable{ + Type: mountTableType, + } + cubbyholeUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create cubbyhole UUID: %v", err)) + } + cubbyholeAccessor, err := c.generateMountAccessor("cubbyhole") + if err != nil { + panic(fmt.Sprintf("could not generate cubbyhole accessor: %v", err)) + } + cubbyholeBackendUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create cubbyhole backend UUID: %v", err)) + } + cubbyholeMount := &MountEntry{ + Table: mountTableType, + Path: cubbyholeMountPath, + Type: cubbyholeMountType, + Description: "per-token private secret storage", + UUID: cubbyholeUUID, + Accessor: cubbyholeAccessor, + Local: true, + BackendAwareUUID: cubbyholeBackendUUID, + } + + sysUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create sys UUID: %v", err)) + } + sysAccessor, err := c.generateMountAccessor("system") + if err != nil { + panic(fmt.Sprintf("could not generate sys accessor: %v", err)) + } + sysBackendUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create sys backend UUID: %v", err)) + } + sysMount := &MountEntry{ + Table: mountTableType, + Path: "sys/", + Type: systemMountType, + Description: "system endpoints used for control, policy and debugging", + UUID: sysUUID, + Accessor: sysAccessor, + BackendAwareUUID: sysBackendUUID, + Config: MountConfig{ + PassthroughRequestHeaders: []string{"Accept"}, + }, + } + + identityUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create identity mount entry UUID: %v", err)) + } + identityAccessor, err := c.generateMountAccessor("identity") + if err != nil { + panic(fmt.Sprintf("could not generate identity accessor: %v", err)) + } + identityBackendUUID, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Sprintf("could not create identity backend UUID: %v", err)) + } + identityMount := &MountEntry{ + Table: mountTableType, + Path: "identity/", + Type: "identity", + Description: "identity store", + UUID: identityUUID, + Accessor: identityAccessor, + BackendAwareUUID: identityBackendUUID, + } + + table.Entries = append(table.Entries, cubbyholeMount) + table.Entries = append(table.Entries, sysMount) + table.Entries = append(table.Entries, identityMount) + + return table +} + +// This function returns tables that are singletons. The main usage of this is +// for replication, so we can send over mount info (especially, UUIDs of +// mounts, which are used for salts) for mounts that may not be able to be +// handled normally. After saving these values on the secondary, we let normal +// sync invalidation do its thing. Because of its use for replication, we +// exclude local mounts. +func (c *Core) singletonMountTables() (mounts, auth *MountTable) { + mounts = &MountTable{} + auth = &MountTable{} + + c.mountsLock.RLock() + for _, entry := range c.mounts.Entries { + if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local && entry.Namespace().ID == namespace.RootNamespaceID { + mounts.Entries = append(mounts.Entries, entry) + } + } + c.mountsLock.RUnlock() + + c.authLock.RLock() + for _, entry := range c.auth.Entries { + if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local && entry.Namespace().ID == namespace.RootNamespaceID { + auth.Entries = append(auth.Entries, entry) + } + } + c.authLock.RUnlock() + + return +} + +func (c *Core) setCoreBackend(entry *MountEntry, backend logical.Backend, view *BarrierView) { + switch entry.Type { + case systemMountType: + c.systemBackend = backend.(*SystemBackend) + c.systemBarrierView = view + case cubbyholeMountType: + ch := backend.(*CubbyholeBackend) + ch.saltUUID = entry.UUID + ch.storageView = view + c.cubbyholeBackend = ch + case identityMountType: + c.identityStore = backend.(*IdentityStore) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/mount_util.go b/vendor/github.com/hashicorp/vault/vault/mount_util.go new file mode 100644 index 00000000..65e5a3d5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/mount_util.go @@ -0,0 +1,56 @@ +// +build !enterprise + +package vault + +import ( + "context" + "path" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +func addPathCheckers(*Core, *MountEntry, logical.Backend, string) {} +func removePathCheckers(*Core, *MountEntry, string) {} +func addAuditPathChecker(*Core, *MountEntry, *BarrierView, string) {} +func removeAuditPathChecker(*Core, *MountEntry) {} +func addFilterablePath(*Core, string) {} +func preprocessMount(*Core, *MountEntry, *BarrierView) (bool, error) { return false, nil } +func clearIgnoredPaths(context.Context, *Core, logical.Backend, string) error { return nil } +func addLicenseCallback(*Core, logical.Backend) {} +func runFilteredPathsEvaluation(context.Context, *Core) error { return nil } + +// ViewPath returns storage prefix for the view +func (e *MountEntry) ViewPath() string { + switch e.Type { + case systemMountType: + return systemBarrierPrefix + case "token": + return path.Join(systemBarrierPrefix, tokenSubPath) + "/" + } + + switch e.Table { + case mountTableType: + return backendBarrierPrefix + e.UUID + "/" + case credentialTableType: + return credentialBarrierPrefix + e.UUID + "/" + case auditTableType: + return auditBarrierPrefix + e.UUID + "/" + } + + panic("invalid mount entry") +} + +func verifyNamespace(*Core, *namespace.Namespace, *MountEntry) error { return nil } + +// mountEntrySysView creates a logical.SystemView from global and +// mount-specific entries; because this should be called when setting +// up a mountEntry, it doesn't check to ensure that me is not nil +func (c *Core) mountEntrySysView(entry *MountEntry) extendedSystemView { + return extendedSystemViewImpl{ + dynamicSystemView{ + core: c, + mountEntry: entry, + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/namespaces.go b/vendor/github.com/hashicorp/vault/vault/namespaces.go new file mode 100644 index 00000000..5b9f31b9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/namespaces.go @@ -0,0 +1,18 @@ +package vault + +import ( + "context" + + "github.com/hashicorp/vault/helper/namespace" +) + +var ( + NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID +) + +func namespaceByID(ctx context.Context, nsID string, c *Core) (*namespace.Namespace, error) { + if nsID == namespace.RootNamespaceID { + return namespace.RootNamespace, nil + } + return nil, namespace.ErrNoNamespace +} diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go new file mode 100644 index 00000000..78c5e805 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go @@ -0,0 +1,381 @@ +package vault + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "path/filepath" + "sort" + "strings" + "sync" + + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + backendplugin "github.com/hashicorp/vault/sdk/plugin" +) + +var ( + pluginCatalogPath = "core/plugin-catalog/" + ErrDirectoryNotConfigured = errors.New("could not set plugin, plugin directory is not configured") + ErrPluginNotFound = errors.New("plugin not found in the catalog") + ErrPluginBadType = errors.New("unable to determine plugin type") +) + +// PluginCatalog keeps a record of plugins known to vault. External plugins need +// to be registered to the catalog before they can be used in backends. Builtin +// plugins are automatically detected and included in the catalog. +type PluginCatalog struct { + builtinRegistry BuiltinRegistry + catalogView *BarrierView + directory string + + lock sync.RWMutex +} + +func (c *Core) setupPluginCatalog(ctx context.Context) error { + c.pluginCatalog = &PluginCatalog{ + builtinRegistry: c.builtinRegistry, + catalogView: NewBarrierView(c.barrier, pluginCatalogPath), + directory: c.pluginDirectory, + } + + // Run upgrade if untyped plugins exist + err := c.pluginCatalog.UpgradePlugins(ctx, c.logger) + if err != nil { + c.logger.Error("error while upgrading plugin storage", "error", err) + } + + if c.logger.IsInfo() { + c.logger.Info("successfully setup plugin catalog", "plugin-directory", c.pluginDirectory) + } + + return nil +} + +// getPluginTypeFromUnknown will attempt to run the plugin to determine the +// type. It will first attempt to run as a database plugin then a backend +// plugin. Both of these will be run in metadata mode. +func (c *PluginCatalog) getPluginTypeFromUnknown(ctx context.Context, logger log.Logger, plugin *pluginutil.PluginRunner) (consts.PluginType, error) { + + { + // Attempt to run as database plugin + client, err := dbplugin.NewPluginClient(ctx, nil, plugin, log.NewNullLogger(), true) + if err == nil { + // Close the client and cleanup the plugin process + client.Close() + return consts.PluginTypeDatabase, nil + } else { + logger.Warn(fmt.Sprintf("received %s attempting as db plugin, attempting as auth/secret plugin", err)) + } + } + + { + // Attempt to run as backend plugin + client, err := backendplugin.NewPluginClient(ctx, nil, plugin, log.NewNullLogger(), true) + if err == nil { + err := client.Setup(ctx, &logical.BackendConfig{}) + if err != nil { + return consts.PluginTypeUnknown, err + } + + backendType := client.Type() + client.Cleanup(ctx) + + switch backendType { + case logical.TypeCredential: + return consts.PluginTypeCredential, nil + case logical.TypeLogical: + return consts.PluginTypeSecrets, nil + } + logger.Warn(fmt.Sprintf("unknown backendType of %s", backendType)) + } else { + logger.Warn(fmt.Sprintf("received %s attempting as an auth/secret plugin, continuing", err)) + } + } + + return consts.PluginTypeUnknown, nil +} + +// UpdatePlugins will loop over all the plugins of unknown type and attempt to +// upgrade them to typed plugins +func (c *PluginCatalog) UpgradePlugins(ctx context.Context, logger log.Logger) error { + c.lock.Lock() + defer c.lock.Unlock() + + // If the directory isn't set we can skip the upgrade attempt + if c.directory == "" { + return nil + } + + // List plugins from old location + pluginsRaw, err := c.catalogView.List(ctx, "") + if err != nil { + return err + } + plugins := make([]string, 0, len(pluginsRaw)) + for _, p := range pluginsRaw { + if !strings.HasSuffix(p, "/") { + plugins = append(plugins, p) + } + } + + logger.Info("upgrading plugin information", "plugins", plugins) + + var retErr error + for _, pluginName := range plugins { + pluginRaw, err := c.catalogView.Get(ctx, pluginName) + if err != nil { + retErr = multierror.Append(errwrap.Wrapf("failed to load plugin entry: {{err}}", err)) + continue + } + + plugin := new(pluginutil.PluginRunner) + if err := jsonutil.DecodeJSON(pluginRaw.Value, plugin); err != nil { + retErr = multierror.Append(errwrap.Wrapf("failed to decode plugin entry: {{err}}", err)) + continue + } + + // prepend the plugin directory to the command + cmdOld := plugin.Command + plugin.Command = filepath.Join(c.directory, plugin.Command) + + pluginType, err := c.getPluginTypeFromUnknown(ctx, logger, plugin) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: %s", pluginName, err)) + continue + } + if pluginType == consts.PluginTypeUnknown { + retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: plugin of unknown type", pluginName)) + continue + } + + // Upgrade the storage + err = c.setInternal(ctx, pluginName, pluginType, cmdOld, plugin.Args, plugin.Env, plugin.Sha256) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: %s", pluginName, err)) + continue + } + + err = c.catalogView.Delete(ctx, pluginName) + if err != nil { + logger.Error("could not remove plugin", "plugin", pluginName, "error", err) + } + + logger.Info("upgraded plugin type", "plugin", pluginName, "type", pluginType.String()) + } + + return retErr +} + +// Get retrieves a plugin with the specified name from the catalog. It first +// looks for external plugins with this name and then looks for builtin plugins. +// It returns a PluginRunner or an error if no plugin was found. +func (c *PluginCatalog) Get(ctx context.Context, name string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) { + c.lock.RLock() + runner, err := c.get(ctx, name, pluginType) + c.lock.RUnlock() + return runner, err +} + +func (c *PluginCatalog) get(ctx context.Context, name string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) { + // If the directory isn't set only look for builtin plugins. + if c.directory != "" { + // Look for external plugins in the barrier + out, err := c.catalogView.Get(ctx, pluginType.String()+"/"+name) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err) + } + if out == nil { + // Also look for external plugins under what their name would have been if they + // were registered before plugin types existed. + out, err = c.catalogView.Get(ctx, name) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err) + } + } + if out != nil { + entry := new(pluginutil.PluginRunner) + if err := jsonutil.DecodeJSON(out.Value, entry); err != nil { + return nil, errwrap.Wrapf("failed to decode plugin entry: {{err}}", err) + } + if entry.Type != pluginType && entry.Type != consts.PluginTypeUnknown { + return nil, nil + } + + // prepend the plugin directory to the command + entry.Command = filepath.Join(c.directory, entry.Command) + + return entry, nil + } + } + // Look for builtin plugins + if factory, ok := c.builtinRegistry.Get(name, pluginType); ok { + return &pluginutil.PluginRunner{ + Name: name, + Type: pluginType, + Builtin: true, + BuiltinFactory: factory, + }, nil + } + + return nil, nil +} + +// Set registers a new external plugin with the catalog, or updates an existing +// external plugin. It takes the name, command and SHA256 of the plugin. +func (c *PluginCatalog) Set(ctx context.Context, name string, pluginType consts.PluginType, command string, args []string, env []string, sha256 []byte) error { + if c.directory == "" { + return ErrDirectoryNotConfigured + } + + switch { + case strings.Contains(name, ".."): + fallthrough + case strings.Contains(command, ".."): + return consts.ErrPathContainsParentReferences + } + + c.lock.Lock() + defer c.lock.Unlock() + + return c.setInternal(ctx, name, pluginType, command, args, env, sha256) +} + +func (c *PluginCatalog) setInternal(ctx context.Context, name string, pluginType consts.PluginType, command string, args []string, env []string, sha256 []byte) error { + // Best effort check to make sure the command isn't breaking out of the + // configured plugin directory. + commandFull := filepath.Join(c.directory, command) + sym, err := filepath.EvalSymlinks(commandFull) + if err != nil { + return errwrap.Wrapf("error while validating the command path: {{err}}", err) + } + symAbs, err := filepath.Abs(filepath.Dir(sym)) + if err != nil { + return errwrap.Wrapf("error while validating the command path: {{err}}", err) + } + + if symAbs != c.directory { + return errors.New("cannot execute files outside of configured plugin directory") + } + + // If the plugin type is unknown, we want to attempt to determine the type + if pluginType == consts.PluginTypeUnknown { + // entryTmp should only be used for the below type check, it uses the + // full command instead of the relative command. + entryTmp := &pluginutil.PluginRunner{ + Name: name, + Command: commandFull, + Args: args, + Env: env, + Sha256: sha256, + Builtin: false, + } + + pluginType, err = c.getPluginTypeFromUnknown(ctx, log.Default(), entryTmp) + if err != nil { + return err + } + if pluginType == consts.PluginTypeUnknown { + return ErrPluginBadType + } + } + + entry := &pluginutil.PluginRunner{ + Name: name, + Type: pluginType, + Command: command, + Args: args, + Env: env, + Sha256: sha256, + Builtin: false, + } + + buf, err := json.Marshal(entry) + if err != nil { + return errwrap.Wrapf("failed to encode plugin entry: {{err}}", err) + } + + logicalEntry := logical.StorageEntry{ + Key: pluginType.String() + "/" + name, + Value: buf, + } + if err := c.catalogView.Put(ctx, &logicalEntry); err != nil { + return errwrap.Wrapf("failed to persist plugin entry: {{err}}", err) + } + return nil +} + +// Delete is used to remove an external plugin from the catalog. Builtin plugins +// can not be deleted. +func (c *PluginCatalog) Delete(ctx context.Context, name string, pluginType consts.PluginType) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Check the name under which the plugin exists, but if it's unfound, don't return any error. + pluginKey := pluginType.String() + "/" + name + out, err := c.catalogView.Get(ctx, pluginKey) + if err != nil || out == nil { + pluginKey = name + } + + return c.catalogView.Delete(ctx, pluginKey) +} + +// List returns a list of all the known plugin names. If an external and builtin +// plugin share the same name, only one instance of the name will be returned. +func (c *PluginCatalog) List(ctx context.Context, pluginType consts.PluginType) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + // Collect keys for external plugins in the barrier. + keys, err := logical.CollectKeys(ctx, c.catalogView) + if err != nil { + return nil, err + } + + // Get the builtin plugins. + builtinKeys := c.builtinRegistry.Keys(pluginType) + + // Use a map to unique the two lists. + mapKeys := make(map[string]bool) + + pluginTypePrefix := pluginType.String() + "/" + + for _, plugin := range keys { + + // Only list user-added plugins if they're of the given type. + if entry, err := c.get(ctx, plugin, pluginType); err == nil && entry != nil { + + // Some keys will be prepended with the plugin type, but other ones won't. + // Users don't expect to see the plugin type, so we need to strip that here. + idx := strings.Index(plugin, pluginTypePrefix) + if idx == 0 { + plugin = plugin[len(pluginTypePrefix):] + } + mapKeys[plugin] = true + } + } + + for _, plugin := range builtinKeys { + mapKeys[plugin] = true + } + + retList := make([]string, len(mapKeys)) + i := 0 + for k := range mapKeys { + retList[i] = k + i++ + } + // sort for consistent ordering of builtin plugins + sort.Strings(retList) + + return retList, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_reload.go b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go new file mode 100644 index 00000000..3b38c1a4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go @@ -0,0 +1,197 @@ +package vault + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/helper/namespace" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// reloadPluginMounts reloads provided mounts, regardless of +// plugin name, as long as the backend type is plugin. +func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) error { + c.mountsLock.RLock() + defer c.mountsLock.RUnlock() + c.authLock.RLock() + defer c.authLock.RUnlock() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + var errors error + for _, mount := range mounts { + entry := c.router.MatchingMountEntry(ctx, mount) + if entry == nil { + errors = multierror.Append(errors, fmt.Errorf("cannot fetch mount entry on %q", mount)) + continue + } + + var isAuth bool + fullPath := c.router.MatchingMount(ctx, mount) + if strings.HasPrefix(fullPath, credentialRoutePrefix) { + isAuth = true + } + + // We dont reload mounts that are not in the same namespace + if ns.ID != entry.Namespace().ID { + continue + } + + err := c.reloadBackendCommon(ctx, entry, isAuth) + if err != nil { + errors = multierror.Append(errors, errwrap.Wrapf(fmt.Sprintf("cannot reload plugin on %q: {{err}}", mount), err)) + continue + } + c.logger.Info("successfully reloaded plugin", "plugin", entry.Accessor, "path", entry.Path) + } + return errors +} + +// reloadPlugin reloads all mounted backends that are of +// plugin pluginName (name of the plugin as registered in +// the plugin catalog). +func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) error { + c.mountsLock.RLock() + defer c.mountsLock.RUnlock() + c.authLock.RLock() + defer c.authLock.RUnlock() + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + // Filter mount entries that only matches the plugin name + for _, entry := range c.mounts.Entries { + // We dont reload mounts that are not in the same namespace + if ns.ID != entry.Namespace().ID { + continue + } + if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) { + err := c.reloadBackendCommon(ctx, entry, false) + if err != nil { + return err + } + c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) + } + } + + // Filter auth mount entries that ony matches the plugin name + for _, entry := range c.auth.Entries { + // We dont reload mounts that are not in the same namespace + if ns.ID != entry.Namespace().ID { + continue + } + + if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) { + err := c.reloadBackendCommon(ctx, entry, true) + if err != nil { + return err + } + c.logger.Info("successfully reloaded plugin", "plugin", entry.Accessor, "path", entry.Path) + } + } + + return nil +} + +// reloadBackendCommon is a generic method to reload a backend provided a +// MountEntry. +func (c *Core) reloadBackendCommon(ctx context.Context, entry *MountEntry, isAuth bool) error { + // Make sure our cache is up-to-date. Since some singleton mounts can be + // tuned, we do this before the below check. + entry.SyncCache() + + // We don't want to reload the singleton mounts. They often have specific + // inmemory elements and we don't want to touch them here. + if strutil.StrListContains(singletonMounts, entry.Type) { + c.logger.Debug("skipping reload of singleton mount", "type", entry.Type) + return nil + } + + path := entry.Path + + if isAuth { + path = credentialRoutePrefix + path + } + + // Fast-path out if the backend doesn't exist + raw, ok := c.router.root.Get(entry.Namespace().Path + path) + if !ok { + return nil + } + + re := raw.(*routeEntry) + + // Grab the lock, this allows requests to drain before we cleanup the + // client. + re.l.Lock() + defer re.l.Unlock() + + // Only call Cleanup if backend is initialized + if re.backend != nil { + // Call backend's Cleanup routine + re.backend.Cleanup(ctx) + } + + view := re.storageView + viewPath := entry.UUID + "/" + switch entry.Table { + case mountTableType: + viewPath = backendBarrierPrefix + viewPath + case credentialTableType: + viewPath = credentialBarrierPrefix + viewPath + } + + removePathCheckers(c, entry, viewPath) + + sysView := c.mountEntrySysView(entry) + + nilMount, err := preprocessMount(c, entry, view.(*BarrierView)) + if err != nil { + return err + } + + var backend logical.Backend + if !isAuth { + // Dispense a new backend + backend, err = c.newLogicalBackend(ctx, entry, sysView, view) + } else { + backend, err = c.newCredentialBackend(ctx, entry, sysView, view) + } + if err != nil { + return err + } + if backend == nil { + return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type) + } + + addPathCheckers(c, entry, backend, viewPath) + + if nilMount { + backend.Cleanup(ctx) + backend = nil + } + + // Set the backend back + re.backend = backend + + if backend != nil { + // Set paths as well + paths := backend.SpecialPaths() + if paths != nil { + re.rootPaths.Store(pathsToRadix(paths.Root)) + re.loginPaths.Store(pathsToRadix(paths.Unauthenticated)) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/policy.go b/vendor/github.com/hashicorp/vault/vault/policy.go new file mode 100644 index 00000000..42b247ea --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/policy.go @@ -0,0 +1,477 @@ +package vault + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/hclutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/mitchellh/copystructure" +) + +const ( + DenyCapability = "deny" + CreateCapability = "create" + ReadCapability = "read" + UpdateCapability = "update" + DeleteCapability = "delete" + ListCapability = "list" + SudoCapability = "sudo" + RootCapability = "root" + + // Backwards compatibility + OldDenyPathPolicy = "deny" + OldReadPathPolicy = "read" + OldWritePathPolicy = "write" + OldSudoPathPolicy = "sudo" +) + +const ( + DenyCapabilityInt uint32 = 1 << iota + CreateCapabilityInt + ReadCapabilityInt + UpdateCapabilityInt + DeleteCapabilityInt + ListCapabilityInt + SudoCapabilityInt +) + +type PolicyType uint32 + +const ( + PolicyTypeACL PolicyType = iota + PolicyTypeRGP + PolicyTypeEGP + + // Triggers a lookup in the map to figure out if ACL or RGP + PolicyTypeToken +) + +func (p PolicyType) String() string { + switch p { + case PolicyTypeACL: + return "acl" + case PolicyTypeRGP: + return "rgp" + case PolicyTypeEGP: + return "egp" + } + + return "" +} + +var ( + cap2Int = map[string]uint32{ + DenyCapability: DenyCapabilityInt, + CreateCapability: CreateCapabilityInt, + ReadCapability: ReadCapabilityInt, + UpdateCapability: UpdateCapabilityInt, + DeleteCapability: DeleteCapabilityInt, + ListCapability: ListCapabilityInt, + SudoCapability: SudoCapabilityInt, + } +) + +type egpPath struct { + Path string `json:"path"` + Glob bool `json:"glob"` +} + +// Policy is used to represent the policy specified by an ACL configuration. +type Policy struct { + sentinelPolicy + Name string `hcl:"name"` + Paths []*PathRules `hcl:"-"` + Raw string + Type PolicyType + Templated bool + namespace *namespace.Namespace +} + +// ShallowClone returns a shallow clone of the policy. This should not be used +// if any of the reference-typed fields are going to be modified +func (p *Policy) ShallowClone() *Policy { + return &Policy{ + sentinelPolicy: p.sentinelPolicy, + Name: p.Name, + Paths: p.Paths, + Raw: p.Raw, + Type: p.Type, + Templated: p.Templated, + namespace: p.namespace, + } +} + +// PathRules represents a policy for a path in the namespace. +type PathRules struct { + Path string + Policy string + Permissions *ACLPermissions + IsPrefix bool + HasSegmentWildcards bool + Capabilities []string + + // These keys are used at the top level to make the HCL nicer; we store in + // the ACLPermissions object though + MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"` + MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"` + AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"` + DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"` + RequiredParametersHCL []string `hcl:"required_parameters"` + MFAMethodsHCL []string `hcl:"mfa_methods"` + ControlGroupHCL *ControlGroupHCL `hcl:"control_group"` +} + +type ControlGroupHCL struct { + TTL interface{} `hcl:"ttl"` + Factors map[string]*ControlGroupFactor `hcl:"factor"` +} + +type ControlGroup struct { + TTL time.Duration + Factors []*ControlGroupFactor +} + +type ControlGroupFactor struct { + Name string + Identity *IdentityFactor `hcl:"identity"` +} + +type IdentityFactor struct { + GroupIDs []string `hcl:"group_ids"` + GroupNames []string `hcl:"group_names"` + ApprovalsRequired int `hcl:"approvals"` +} + +type ACLPermissions struct { + CapabilitiesBitmap uint32 + MinWrappingTTL time.Duration + MaxWrappingTTL time.Duration + AllowedParameters map[string][]interface{} + DeniedParameters map[string][]interface{} + RequiredParameters []string + MFAMethods []string + ControlGroup *ControlGroup +} + +func (p *ACLPermissions) Clone() (*ACLPermissions, error) { + ret := &ACLPermissions{ + CapabilitiesBitmap: p.CapabilitiesBitmap, + MinWrappingTTL: p.MinWrappingTTL, + MaxWrappingTTL: p.MaxWrappingTTL, + RequiredParameters: p.RequiredParameters[:], + } + + switch { + case p.AllowedParameters == nil: + case len(p.AllowedParameters) == 0: + ret.AllowedParameters = make(map[string][]interface{}) + default: + clonedAllowed, err := copystructure.Copy(p.AllowedParameters) + if err != nil { + return nil, err + } + ret.AllowedParameters = clonedAllowed.(map[string][]interface{}) + } + + switch { + case p.DeniedParameters == nil: + case len(p.DeniedParameters) == 0: + ret.DeniedParameters = make(map[string][]interface{}) + default: + clonedDenied, err := copystructure.Copy(p.DeniedParameters) + if err != nil { + return nil, err + } + ret.DeniedParameters = clonedDenied.(map[string][]interface{}) + } + + switch { + case p.MFAMethods == nil: + case len(p.MFAMethods) == 0: + ret.MFAMethods = []string{} + default: + clonedMFAMethods, err := copystructure.Copy(p.MFAMethods) + if err != nil { + return nil, err + } + ret.MFAMethods = clonedMFAMethods.([]string) + } + + switch { + case p.ControlGroup == nil: + default: + clonedControlGroup, err := copystructure.Copy(p.ControlGroup) + if err != nil { + return nil, err + } + ret.ControlGroup = clonedControlGroup.(*ControlGroup) + } + + return ret, nil +} + +// ParseACLPolicy is used to parse the specified ACL rules into an +// intermediary set of policies, before being compiled into +// the ACL +func ParseACLPolicy(ns *namespace.Namespace, rules string) (*Policy, error) { + return parseACLPolicyWithTemplating(ns, rules, false, nil, nil) +} + +// parseACLPolicyWithTemplating performs the actual work and checks whether we +// should perform substitutions. If performTemplating is true we know that it +// is templated so we don't check again, otherwise we check to see if it's a +// templated policy. +func parseACLPolicyWithTemplating(ns *namespace.Namespace, rules string, performTemplating bool, entity *identity.Entity, groups []*identity.Group) (*Policy, error) { + // Parse the rules + root, err := hcl.Parse(rules) + if err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + + // Top-level item should be the object list + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("failed to parse policy: does not contain a root object") + } + + // Check for invalid top-level keys + valid := []string{ + "name", + "path", + } + if err := hclutil.CheckHCLKeys(list, valid); err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + + // Create the initial policy and store the raw text of the rules + p := Policy{ + Raw: rules, + Type: PolicyTypeACL, + namespace: ns, + } + if err := hcl.DecodeObject(&p, list); err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + + if o := list.Filter("path"); len(o.Items) > 0 { + if err := parsePaths(&p, o, performTemplating, entity, groups); err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + } + + return &p, nil +} + +func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, entity *identity.Entity, groups []*identity.Group) error { + paths := make([]*PathRules, 0, len(list.Items)) + for _, item := range list.Items { + key := "path" + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + // Check the path + if performTemplating { + _, templated, err := identity.PopulateString(identity.PopulateStringInput{ + Mode: identity.ACLTemplating, + String: key, + Entity: entity, + Groups: groups, + Namespace: result.namespace, + }) + if err != nil { + continue + } + key = templated + } else { + hasTemplating, _, err := identity.PopulateString(identity.PopulateStringInput{ + Mode: identity.ACLTemplating, + ValidityCheckOnly: true, + String: key, + }) + if err != nil { + return errwrap.Wrapf("failed to validate policy templating: {{err}}", err) + } + if hasTemplating { + result.Templated = true + } + } + + valid := []string{ + "comment", + "policy", + "capabilities", + "allowed_parameters", + "denied_parameters", + "required_parameters", + "min_wrapping_ttl", + "max_wrapping_ttl", + "mfa_methods", + "control_group", + } + if err := hclutil.CheckHCLKeys(item.Val, valid); err != nil { + return multierror.Prefix(err, fmt.Sprintf("path %q:", key)) + } + + var pc PathRules + + // allocate memory so that DecodeObject can initialize the ACLPermissions struct + pc.Permissions = new(ACLPermissions) + + pc.Path = key + + if err := hcl.DecodeObject(&pc, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("path %q:", key)) + } + + // Strip a leading '/' as paths in Vault start after the / in the API path + if len(pc.Path) > 0 && pc.Path[0] == '/' { + pc.Path = pc.Path[1:] + } + + // Ensure we are using the full request path internally + pc.Path = result.namespace.Path + pc.Path + + if strings.Contains(pc.Path, "+*") { + return fmt.Errorf("path %q: invalid use of wildcards ('+*' is forbidden)", pc.Path) + } + + if pc.Path == "+" || strings.Count(pc.Path, "/+") > 0 || strings.HasPrefix(pc.Path, "+/") { + pc.HasSegmentWildcards = true + } + + if strings.HasSuffix(pc.Path, "*") { + // If there are segment wildcards, don't actually strip the + // trailing asterisk, but don't want to hit the default case + if !pc.HasSegmentWildcards { + // Strip the glob character if found + pc.Path = strings.TrimSuffix(pc.Path, "*") + pc.IsPrefix = true + } + } + + // Map old-style policies into capabilities + if len(pc.Policy) > 0 { + switch pc.Policy { + case OldDenyPathPolicy: + pc.Capabilities = []string{DenyCapability} + case OldReadPathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{ReadCapability, ListCapability}...) + case OldWritePathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability}...) + case OldSudoPathPolicy: + pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability}...) + default: + return fmt.Errorf("path %q: invalid policy %q", key, pc.Policy) + } + } + + // Initialize the map + pc.Permissions.CapabilitiesBitmap = 0 + for _, cap := range pc.Capabilities { + switch cap { + // If it's deny, don't include any other capability + case DenyCapability: + pc.Capabilities = []string{DenyCapability} + pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt + goto PathFinished + case CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability: + pc.Permissions.CapabilitiesBitmap |= cap2Int[cap] + default: + return fmt.Errorf("path %q: invalid capability %q", key, cap) + } + } + + if pc.AllowedParametersHCL != nil { + pc.Permissions.AllowedParameters = make(map[string][]interface{}, len(pc.AllowedParametersHCL)) + for key, val := range pc.AllowedParametersHCL { + pc.Permissions.AllowedParameters[strings.ToLower(key)] = val + } + } + if pc.DeniedParametersHCL != nil { + pc.Permissions.DeniedParameters = make(map[string][]interface{}, len(pc.DeniedParametersHCL)) + + for key, val := range pc.DeniedParametersHCL { + pc.Permissions.DeniedParameters[strings.ToLower(key)] = val + } + } + if pc.MinWrappingTTLHCL != nil { + dur, err := parseutil.ParseDurationSecond(pc.MinWrappingTTLHCL) + if err != nil { + return errwrap.Wrapf("error parsing min_wrapping_ttl: {{err}}", err) + } + pc.Permissions.MinWrappingTTL = dur + } + if pc.MaxWrappingTTLHCL != nil { + dur, err := parseutil.ParseDurationSecond(pc.MaxWrappingTTLHCL) + if err != nil { + return errwrap.Wrapf("error parsing max_wrapping_ttl: {{err}}", err) + } + pc.Permissions.MaxWrappingTTL = dur + } + if pc.MFAMethodsHCL != nil { + pc.Permissions.MFAMethods = make([]string, len(pc.MFAMethodsHCL)) + for idx, item := range pc.MFAMethodsHCL { + pc.Permissions.MFAMethods[idx] = item + } + } + if pc.ControlGroupHCL != nil { + pc.Permissions.ControlGroup = new(ControlGroup) + if pc.ControlGroupHCL.TTL != nil { + dur, err := parseutil.ParseDurationSecond(pc.ControlGroupHCL.TTL) + if err != nil { + return errwrap.Wrapf("error parsing control group max ttl: {{err}}", err) + } + pc.Permissions.ControlGroup.TTL = dur + } + + var factors []*ControlGroupFactor + if pc.ControlGroupHCL.Factors != nil { + for key, factor := range pc.ControlGroupHCL.Factors { + // Although we only have one factor here, we need to check to make sure there is at least + // one factor defined in this factor block. + if factor.Identity == nil { + return errors.New("no control_group factor provided") + } + + if factor.Identity.ApprovalsRequired <= 0 || + (len(factor.Identity.GroupIDs) == 0 && len(factor.Identity.GroupNames) == 0) { + return errors.New("must provide more than one identity group and approvals > 0") + } + + factors = append(factors, &ControlGroupFactor{ + Name: key, + Identity: factor.Identity, + }) + } + } + if len(factors) == 0 { + return errors.New("no control group factors provided") + } + pc.Permissions.ControlGroup.Factors = factors + } + if pc.Permissions.MinWrappingTTL != 0 && + pc.Permissions.MaxWrappingTTL != 0 && + pc.Permissions.MaxWrappingTTL < pc.Permissions.MinWrappingTTL { + return errors.New("max_wrapping_ttl cannot be less than min_wrapping_ttl") + } + if len(pc.RequiredParametersHCL) > 0 { + pc.Permissions.RequiredParameters = pc.RequiredParametersHCL[:] + } + + PathFinished: + paths = append(paths, &pc) + } + + result.Paths = paths + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store.go b/vendor/github.com/hashicorp/vault/vault/policy_store.go new file mode 100644 index 00000000..33f8cf0b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/policy_store.go @@ -0,0 +1,853 @@ +package vault + +import ( + "context" + "fmt" + "path" + "strings" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // policySubPath is the sub-path used for the policy store view. This is + // nested under the system view. policyRGPSubPath/policyEGPSubPath are + // similar but for RGPs/EGPs. + policyACLSubPath = "policy/" + policyRGPSubPath = "policy-rgp/" + policyEGPSubPath = "policy-egp/" + + // policyCacheSize is the number of policies that are kept cached + policyCacheSize = 1024 + + // defaultPolicyName is the name of the default policy + defaultPolicyName = "default" + + // responseWrappingPolicyName is the name of the fixed policy + responseWrappingPolicyName = "response-wrapping" + + // controlGroupPolicyName is the name of the fixed policy for control group + // tokens + controlGroupPolicyName = "control-group" + + // responseWrappingPolicy is the policy that ensures cubbyhole response + // wrapping can always succeed. + responseWrappingPolicy = ` +path "cubbyhole/response" { + capabilities = ["create", "read"] +} + +path "sys/wrapping/unwrap" { + capabilities = ["update"] +} +` + // controlGroupPolicy is the policy that ensures control group requests can + // commit themselves + controlGroupPolicy = ` +path "cubbyhole/control-group" { + capabilities = ["update", "create", "read"] +} + +path "sys/wrapping/unwrap" { + capabilities = ["update"] +} +` + // defaultPolicy is the "default" policy + defaultPolicy = ` +# Allow tokens to look up their own properties +path "auth/token/lookup-self" { + capabilities = ["read"] +} + +# Allow tokens to renew themselves +path "auth/token/renew-self" { + capabilities = ["update"] +} + +# Allow tokens to revoke themselves +path "auth/token/revoke-self" { + capabilities = ["update"] +} + +# Allow a token to look up its own capabilities on a path +path "sys/capabilities-self" { + capabilities = ["update"] +} + +# Allow a token to look up its own entity by id or name +path "identity/entity/id/{{identity.entity.id}}" { + capabilities = ["read"] +} +path "identity/entity/name/{{identity.entity.name}}" { + capabilities = ["read"] +} + + +# Allow a token to look up its resultant ACL from all policies. This is useful +# for UIs. It is an internal path because the format may change at any time +# based on how the internal ACL features and capabilities change. +path "sys/internal/ui/resultant-acl" { + capabilities = ["read"] +} + +# Allow a token to renew a lease via lease_id in the request body; old path for +# old clients, new path for newer +path "sys/renew" { + capabilities = ["update"] +} +path "sys/leases/renew" { + capabilities = ["update"] +} + +# Allow looking up lease properties. This requires knowing the lease ID ahead +# of time and does not divulge any sensitive information. +path "sys/leases/lookup" { + capabilities = ["update"] +} + +# Allow a token to manage its own cubbyhole +path "cubbyhole/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} + +# Allow a token to wrap arbitrary values in a response-wrapping token +path "sys/wrapping/wrap" { + capabilities = ["update"] +} + +# Allow a token to look up the creation time and TTL of a given +# response-wrapping token +path "sys/wrapping/lookup" { + capabilities = ["update"] +} + +# Allow a token to unwrap a response-wrapping token. This is a convenience to +# avoid client token swapping since this is also part of the response wrapping +# policy. +path "sys/wrapping/unwrap" { + capabilities = ["update"] +} + +# Allow general purpose tools +path "sys/tools/hash" { + capabilities = ["update"] +} +path "sys/tools/hash/*" { + capabilities = ["update"] +} + +# Allow checking the status of a Control Group request if the user has the +# accessor +path "sys/control-group/request" { + capabilities = ["update"] +} +` +) + +var ( + immutablePolicies = []string{ + "root", + responseWrappingPolicyName, + controlGroupPolicyName, + } + nonAssignablePolicies = []string{ + responseWrappingPolicyName, + controlGroupPolicyName, + } +) + +// PolicyStore is used to provide durable storage of policy, and to +// manage ACLs associated with them. +type PolicyStore struct { + entPolicyStore + + core *Core + aclView *BarrierView + rgpView *BarrierView + egpView *BarrierView + + tokenPoliciesLRU *lru.TwoQueueCache + egpLRU *lru.TwoQueueCache + + // This is used to ensure that writes to the store (acl/rgp) or to the egp + // path tree don't happen concurrently. We are okay reading stale data so + // long as there aren't concurrent writes. + modifyLock *sync.RWMutex + + // Stores whether a token policy is ACL or RGP + policyTypeMap sync.Map + + // logger is the server logger copied over from core + logger log.Logger +} + +// PolicyEntry is used to store a policy by name +type PolicyEntry struct { + sentinelPolicy + + Version int + Raw string + Templated bool + Type PolicyType +} + +// NewPolicyStore creates a new PolicyStore that is backed +// using a given view. It used used to durable store and manage named policy. +func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, system logical.SystemView, logger log.Logger) (*PolicyStore, error) { + ps := &PolicyStore{ + aclView: baseView.SubView(policyACLSubPath), + rgpView: baseView.SubView(policyRGPSubPath), + egpView: baseView.SubView(policyEGPSubPath), + modifyLock: new(sync.RWMutex), + logger: logger, + core: core, + } + + ps.extraInit() + + if !system.CachingDisabled() { + cache, _ := lru.New2Q(policyCacheSize) + ps.tokenPoliciesLRU = cache + cache, _ = lru.New2Q(policyCacheSize) + ps.egpLRU = cache + } + + aclView := ps.getACLView(namespace.RootNamespace) + keys, err := logical.CollectKeys(namespace.RootContext(ctx), aclView) + if err != nil { + ps.logger.Error("error collecting acl policy keys", "error", err) + return nil, err + } + for _, key := range keys { + index := ps.cacheKey(namespace.RootNamespace, ps.sanitizeName(key)) + ps.policyTypeMap.Store(index, PolicyTypeACL) + } + + if err := ps.loadNamespacePolicies(ctx, core); err != nil { + return nil, err + } + + // Special-case root; doesn't exist on disk but does need to be found + ps.policyTypeMap.Store(ps.cacheKey(namespace.RootNamespace, "root"), PolicyTypeACL) + return ps, nil +} + +// setupPolicyStore is used to initialize the policy store +// when the vault is being unsealed. +func (c *Core) setupPolicyStore(ctx context.Context) error { + // Create the policy store + var err error + sysView := &dynamicSystemView{core: c} + psLogger := c.baseLogger.Named("policy") + c.AddLogger(psLogger) + c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger) + if err != nil { + return err + } + + if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationDRSecondary) { + // Policies will sync from the primary + return nil + } + + // Ensure that the default policy exists, and if not, create it + if err := c.policyStore.loadACLPolicy(ctx, defaultPolicyName, defaultPolicy); err != nil { + return err + } + // Ensure that the response wrapping policy exists + if err := c.policyStore.loadACLPolicy(ctx, responseWrappingPolicyName, responseWrappingPolicy); err != nil { + return err + } + // Ensure that the control group policy exists + if err := c.policyStore.loadACLPolicy(ctx, controlGroupPolicyName, controlGroupPolicy); err != nil { + return err + } + + return nil +} + +// teardownPolicyStore is used to reverse setupPolicyStore +// when the vault is being sealed. +func (c *Core) teardownPolicyStore() error { + c.policyStore = nil + return nil +} + +func (ps *PolicyStore) invalidate(ctx context.Context, name string, policyType PolicyType) { + ns, err := namespace.FromContext(ctx) + if err != nil { + ps.logger.Error("unable to invalidate key, no namespace info passed", "key", name) + return + } + + // This may come with a prefixed "/" due to joining the file path + saneName := strings.TrimPrefix(name, "/") + index := ps.cacheKey(ns, saneName) + + ps.modifyLock.Lock() + defer ps.modifyLock.Unlock() + + // We don't lock before removing from the LRU here because the worst that + // can happen is we load again if something since added it + switch policyType { + case PolicyTypeACL, PolicyTypeRGP: + if ps.tokenPoliciesLRU != nil { + ps.tokenPoliciesLRU.Remove(index) + } + + case PolicyTypeEGP: + if ps.egpLRU != nil { + ps.egpLRU.Remove(index) + } + + default: + // Can't do anything + return + } + + // Force a reload + out, err := ps.switchedGetPolicy(ctx, name, policyType, false) + if err != nil { + ps.logger.Error("error fetching policy after invalidation", "name", saneName) + } + + // If true, the invalidation was actually a delete, so we may need to + // perform further deletion tasks. We skip the physical deletion just in + // case another process has re-written the policy; instead next time Get is + // called the values will be loaded back in. + if out == nil { + ps.switchedDeletePolicy(ctx, name, policyType, false, true) + } + + return +} + +// SetPolicy is used to create or update the given policy +func (ps *PolicyStore) SetPolicy(ctx context.Context, p *Policy) error { + defer metrics.MeasureSince([]string{"policy", "set_policy"}, time.Now()) + if p == nil { + return fmt.Errorf("nil policy passed in for storage") + } + if p.Name == "" { + return fmt.Errorf("policy name missing") + } + // Policies are normalized to lower-case + p.Name = ps.sanitizeName(p.Name) + if strutil.StrListContains(immutablePolicies, p.Name) { + return fmt.Errorf("cannot update %q policy", p.Name) + } + + return ps.setPolicyInternal(ctx, p) +} + +func (ps *PolicyStore) setPolicyInternal(ctx context.Context, p *Policy) error { + ps.modifyLock.Lock() + defer ps.modifyLock.Unlock() + + // Get the appropriate view based on policy type and namespace + view := ps.getBarrierView(p.namespace, p.Type) + if view == nil { + return fmt.Errorf("unable to get the barrier subview for policy type %q", p.Type) + } + + if err := ps.parseEGPPaths(p); err != nil { + return err + } + + // Create the entry + entry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{ + Version: 2, + Raw: p.Raw, + Type: p.Type, + Templated: p.Templated, + sentinelPolicy: p.sentinelPolicy, + }) + if err != nil { + return errwrap.Wrapf("failed to create entry: {{err}}", err) + } + + // Construct the cache key + index := ps.cacheKey(p.namespace, p.Name) + + switch p.Type { + case PolicyTypeACL: + rgpView := ps.getRGPView(p.namespace) + rgp, err := rgpView.Get(ctx, entry.Key) + if err != nil { + return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err) + } + if rgp != nil { + return fmt.Errorf("cannot reuse policy names between ACLs and RGPs") + } + + if err := view.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to persist policy: {{err}}", err) + } + + ps.policyTypeMap.Store(index, PolicyTypeACL) + + if ps.tokenPoliciesLRU != nil { + ps.tokenPoliciesLRU.Add(index, p) + } + + case PolicyTypeRGP: + aclView := ps.getACLView(p.namespace) + acl, err := aclView.Get(ctx, entry.Key) + if err != nil { + return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err) + } + if acl != nil { + return fmt.Errorf("cannot reuse policy names between ACLs and RGPs") + } + + if err := ps.handleSentinelPolicy(ctx, p, view, entry); err != nil { + return err + } + + ps.policyTypeMap.Store(index, PolicyTypeRGP) + + // We load here after successfully loading into Sentinel so that on + // error we will try loading again on the next get + if ps.tokenPoliciesLRU != nil { + ps.tokenPoliciesLRU.Add(index, p) + } + + case PolicyTypeEGP: + if err := ps.handleSentinelPolicy(ctx, p, view, entry); err != nil { + return err + } + + // We load here after successfully loading into Sentinel so that on + // error we will try loading again on the next get + if ps.egpLRU != nil { + ps.egpLRU.Add(index, p) + } + + default: + return fmt.Errorf("unknown policy type, cannot set") + } + + return nil +} + +// GetPolicy is used to fetch the named policy +func (ps *PolicyStore) GetPolicy(ctx context.Context, name string, policyType PolicyType) (*Policy, error) { + return ps.switchedGetPolicy(ctx, name, policyType, true) +} + +func (ps *PolicyStore) switchedGetPolicy(ctx context.Context, name string, policyType PolicyType, grabLock bool) (*Policy, error) { + defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now()) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + // Policies are normalized to lower-case + name = ps.sanitizeName(name) + index := ps.cacheKey(ns, name) + + var cache *lru.TwoQueueCache + var view *BarrierView + + switch policyType { + case PolicyTypeACL: + cache = ps.tokenPoliciesLRU + view = ps.getACLView(ns) + case PolicyTypeRGP: + cache = ps.tokenPoliciesLRU + view = ps.getRGPView(ns) + case PolicyTypeEGP: + cache = ps.egpLRU + view = ps.getEGPView(ns) + case PolicyTypeToken: + cache = ps.tokenPoliciesLRU + val, ok := ps.policyTypeMap.Load(index) + if !ok { + // Doesn't exist + return nil, nil + } + policyType = val.(PolicyType) + switch policyType { + case PolicyTypeACL: + view = ps.getACLView(ns) + case PolicyTypeRGP: + view = ps.getRGPView(ns) + default: + return nil, fmt.Errorf("invalid type of policy in type map: %q", policyType) + } + } + + if cache != nil { + // Check for cached policy + if raw, ok := cache.Get(index); ok { + return raw.(*Policy), nil + } + } + + // Special case the root policy + if policyType == PolicyTypeACL && name == "root" && ns.ID == namespace.RootNamespaceID { + p := &Policy{ + Name: "root", + namespace: namespace.RootNamespace, + } + if cache != nil { + cache.Add(index, p) + } + return p, nil + } + + if grabLock { + ps.modifyLock.Lock() + defer ps.modifyLock.Unlock() + } + + // See if anything has added it since we got the lock + if cache != nil { + if raw, ok := cache.Get(index); ok { + return raw.(*Policy), nil + } + } + + // Nil-check on the view before proceeding to retrieve from storage + if view == nil { + return nil, fmt.Errorf("unable to get the barrier subview for policy type %q", policyType) + } + + out, err := view.Get(ctx, name) + if err != nil { + return nil, errwrap.Wrapf("failed to read policy: {{err}}", err) + } + + if out == nil { + return nil, nil + } + + policyEntry := new(PolicyEntry) + policy := new(Policy) + err = out.DecodeJSON(policyEntry) + if err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + + // Set these up here so that they're available for loading into + // Sentinel + policy.Name = name + policy.Raw = policyEntry.Raw + policy.Type = policyEntry.Type + policy.Templated = policyEntry.Templated + policy.sentinelPolicy = policyEntry.sentinelPolicy + policy.namespace = ns + switch policyEntry.Type { + case PolicyTypeACL: + // Parse normally + p, err := ParseACLPolicy(ns, policyEntry.Raw) + if err != nil { + return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + } + policy.Paths = p.Paths + + // Reset this in case they set the name in the policy itself + policy.Name = name + + ps.policyTypeMap.Store(index, PolicyTypeACL) + + case PolicyTypeRGP: + if err := ps.handleSentinelPolicy(ctx, policy, nil, nil); err != nil { + return nil, err + } + + ps.policyTypeMap.Store(index, PolicyTypeRGP) + + case PolicyTypeEGP: + if err := ps.handleSentinelPolicy(ctx, policy, nil, nil); err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("unknown policy type %q", policyEntry.Type.String()) + } + + if cache != nil { + // Update the LRU cache + cache.Add(index, policy) + } + + return policy, nil +} + +// ListPolicies is used to list the available policies +func (ps *PolicyStore) ListPolicies(ctx context.Context, policyType PolicyType) ([]string, error) { + defer metrics.MeasureSince([]string{"policy", "list_policies"}, time.Now()) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns == nil { + return nil, namespace.ErrNoNamespace + } + + // Get the appropriate view based on policy type and namespace + view := ps.getBarrierView(ns, policyType) + if view == nil { + return []string{}, fmt.Errorf("unable to get the barrier subview for policy type %q", policyType) + } + + // Scan the view, since the policy names are the same as the + // key names. + var keys []string + switch policyType { + case PolicyTypeACL: + keys, err = logical.CollectKeys(ctx, view) + case PolicyTypeRGP: + return logical.CollectKeys(ctx, view) + case PolicyTypeEGP: + return logical.CollectKeys(ctx, view) + default: + return nil, fmt.Errorf("unknown policy type %q", policyType) + } + + // We only have non-assignable ACL policies at the moment + for _, nonAssignable := range nonAssignablePolicies { + deleteIndex := -1 + // Find indices of non-assignable policies in keys + for index, key := range keys { + if key == nonAssignable { + // Delete collection outside the loop + deleteIndex = index + break + } + } + // Remove non-assignable policies when found + if deleteIndex != -1 { + keys = append(keys[:deleteIndex], keys[deleteIndex+1:]...) + } + } + + return keys, err +} + +// DeletePolicy is used to delete the named policy +func (ps *PolicyStore) DeletePolicy(ctx context.Context, name string, policyType PolicyType) error { + return ps.switchedDeletePolicy(ctx, name, policyType, true, false) +} + +// deletePolicyForce is used to delete the named policy and force it even if +// default or a singleton. It's used for invalidations or namespace deletion +// where we internally need to actually remove a policy that the user normally +// isn't allowed to remove. +func (ps *PolicyStore) deletePolicyForce(ctx context.Context, name string, policyType PolicyType) error { + return ps.switchedDeletePolicy(ctx, name, policyType, true, true) +} + +func (ps *PolicyStore) switchedDeletePolicy(ctx context.Context, name string, policyType PolicyType, physicalDeletion, force bool) error { + defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now()) + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + // If not set, the call comes from invalidation, where we'll already have + // grabbed the lock + if physicalDeletion { + ps.modifyLock.Lock() + defer ps.modifyLock.Unlock() + } + + // Policies are normalized to lower-case + name = ps.sanitizeName(name) + index := ps.cacheKey(ns, name) + + view := ps.getBarrierView(ns, policyType) + if view == nil { + return fmt.Errorf("unable to get the barrier subview for policy type %q", policyType) + } + + switch policyType { + case PolicyTypeACL: + if !force { + if strutil.StrListContains(immutablePolicies, name) { + return fmt.Errorf("cannot delete %q policy", name) + } + if name == "default" { + return fmt.Errorf("cannot delete default policy") + } + } + + if physicalDeletion { + err := view.Delete(ctx, name) + if err != nil { + return errwrap.Wrapf("failed to delete policy: {{err}}", err) + } + } + + if ps.tokenPoliciesLRU != nil { + // Clear the cache + ps.tokenPoliciesLRU.Remove(index) + } + + ps.policyTypeMap.Delete(index) + + case PolicyTypeRGP: + if physicalDeletion { + err := view.Delete(ctx, name) + if err != nil { + return errwrap.Wrapf("failed to delete policy: {{err}}", err) + } + } + + if ps.tokenPoliciesLRU != nil { + // Clear the cache + ps.tokenPoliciesLRU.Remove(index) + } + + ps.policyTypeMap.Delete(index) + + defer ps.core.invalidateSentinelPolicy(policyType, index) + + case PolicyTypeEGP: + if physicalDeletion { + err := view.Delete(ctx, name) + if err != nil { + return errwrap.Wrapf("failed to delete policy: {{err}}", err) + } + } + + if ps.egpLRU != nil { + // Clear the cache + ps.egpLRU.Remove(index) + } + + defer ps.core.invalidateSentinelPolicy(policyType, index) + + ps.invalidateEGPTreePath(index) + } + + return nil +} + +type TemplateError struct { + Err error +} + +func (t *TemplateError) WrappedErrors() []error { + return []error{t.Err} +} + +func (t *TemplateError) Error() string { + return t.Err.Error() +} + +// ACL is used to return an ACL which is built using the +// named policies. +func (ps *PolicyStore) ACL(ctx context.Context, entity *identity.Entity, policyNames map[string][]string) (*ACL, error) { + var policies []*Policy + // Fetch the policies + for nsID, nsPolicyNames := range policyNames { + policyNS, err := NamespaceByID(ctx, nsID, ps.core) + if err != nil { + return nil, err + } + if policyNS == nil { + return nil, namespace.ErrNoNamespace + } + policyCtx := namespace.ContextWithNamespace(ctx, policyNS) + for _, nsPolicyName := range nsPolicyNames { + p, err := ps.GetPolicy(policyCtx, nsPolicyName, PolicyTypeToken) + if err != nil { + return nil, errwrap.Wrapf("failed to get policy: {{err}}", err) + } + if p != nil { + policies = append(policies, p) + } + } + } + + var fetchedGroups bool + var groups []*identity.Group + for i, policy := range policies { + if policy.Type == PolicyTypeACL && policy.Templated { + if !fetchedGroups { + fetchedGroups = true + if entity != nil { + directGroups, inheritedGroups, err := ps.core.identityStore.groupsByEntityID(entity.ID) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch group memberships: {{err}}", err) + } + groups = append(directGroups, inheritedGroups...) + } + } + p, err := parseACLPolicyWithTemplating(policy.namespace, policy.Raw, true, entity, groups) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error parsing templated policy %q: {{err}}", policy.Name), err) + } + p.Name = policy.Name + policies[i] = p + } + } + + // Construct the ACL + acl, err := NewACL(ctx, policies) + if err != nil { + return nil, errwrap.Wrapf("failed to construct ACL: {{err}}", err) + } + + return acl, nil +} + +// loadACLPolicy is used to load default ACL policies. The default policies will +// be loaded to all namespaces. +func (ps *PolicyStore) loadACLPolicy(ctx context.Context, policyName, policyText string) error { + return ps.loadACLPolicyNamespaces(ctx, policyName, policyText) +} + +// loadACLPolicyInternal is used to load default ACL policies in a specific +// namespace. +func (ps *PolicyStore) loadACLPolicyInternal(ctx context.Context, policyName, policyText string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + // Check if the policy already exists + policy, err := ps.GetPolicy(ctx, policyName, PolicyTypeACL) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error fetching %s policy from store: {{err}}", policyName), err) + } + if policy != nil { + if !strutil.StrListContains(immutablePolicies, policyName) || policyText == policy.Raw { + return nil + } + } + + policy, err = ParseACLPolicy(ns, policyText) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error parsing %s policy: {{err}}", policyName), err) + } + + if policy == nil { + return fmt.Errorf("parsing %q policy resulted in nil policy", policyName) + } + + policy.Name = policyName + policy.Type = PolicyTypeACL + return ps.setPolicyInternal(ctx, policy) +} + +func (ps *PolicyStore) sanitizeName(name string) string { + return strings.ToLower(strings.TrimSpace(name)) +} + +func (ps *PolicyStore) cacheKey(ns *namespace.Namespace, name string) string { + return path.Join(ns.ID, name) +} diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store_util.go b/vendor/github.com/hashicorp/vault/vault/policy_store_util.go new file mode 100644 index 00000000..6b829132 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/policy_store_util.go @@ -0,0 +1,47 @@ +// +build !enterprise + +package vault + +import ( + "context" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +type entPolicyStore struct{} + +func (ps *PolicyStore) extraInit() { +} + +func (ps *PolicyStore) loadNamespacePolicies(context.Context, *Core) error { return nil } + +func (ps *PolicyStore) getACLView(*namespace.Namespace) *BarrierView { + return ps.aclView +} + +func (ps *PolicyStore) getRGPView(ns *namespace.Namespace) *BarrierView { + return ps.rgpView +} + +func (ps *PolicyStore) getEGPView(ns *namespace.Namespace) *BarrierView { + return ps.egpView +} + +func (ps *PolicyStore) getBarrierView(ns *namespace.Namespace, _ PolicyType) *BarrierView { + return ps.getACLView(ns) +} + +func (ps *PolicyStore) handleSentinelPolicy(context.Context, *Policy, *BarrierView, *logical.StorageEntry) error { + return nil +} + +func (ps *PolicyStore) parseEGPPaths(*Policy) error { return nil } + +func (ps *PolicyStore) invalidateEGPTreePath(string) {} + +func (ps *PolicyStore) pathsToEGPPaths(*Policy) ([]*egpPath, error) { return nil, nil } + +func (ps *PolicyStore) loadACLPolicyNamespaces(ctx context.Context, policyName, policyText string) error { + return ps.loadACLPolicyInternal(namespace.RootContext(ctx), policyName, policyText) +} diff --git a/vendor/github.com/hashicorp/vault/vault/policy_util.go b/vendor/github.com/hashicorp/vault/vault/policy_util.go new file mode 100644 index 00000000..74b92639 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/policy_util.go @@ -0,0 +1,5 @@ +// +build !enterprise + +package vault + +type sentinelPolicy struct{} diff --git a/vendor/github.com/hashicorp/vault/vault/raft.go b/vendor/github.com/hashicorp/vault/vault/raft.go new file mode 100644 index 00000000..30f3901c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/raft.go @@ -0,0 +1,741 @@ +package vault + +import ( + "context" + "crypto/tls" + "encoding/base64" + "errors" + "fmt" + "math" + "net/http" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" + "github.com/mitchellh/mapstructure" + "golang.org/x/net/http2" +) + +var ( + raftTLSStoragePath = "core/raft/tls" + raftTLSRotationPeriod = 24 * time.Hour +) + +type raftFollowerStates struct { + l sync.RWMutex + followers map[string]uint64 +} + +func (s *raftFollowerStates) update(nodeID string, appliedIndex uint64) { + s.l.Lock() + s.followers[nodeID] = appliedIndex + s.l.Unlock() +} +func (s *raftFollowerStates) delete(nodeID string) { + s.l.RLock() + delete(s.followers, nodeID) + s.l.RUnlock() +} +func (s *raftFollowerStates) get(nodeID string) uint64 { + s.l.RLock() + index := s.followers[nodeID] + s.l.RUnlock() + return index +} +func (s *raftFollowerStates) minIndex() uint64 { + var min uint64 = math.MaxUint64 + minFunc := func(a, b uint64) uint64 { + if a > b { + return b + } + return a + } + + s.l.RLock() + for _, i := range s.followers { + min = minFunc(min, i) + } + s.l.RUnlock() + + if min == math.MaxUint64 { + return 0 + } + + return min +} + +// startRaftStorage will call SetupCluster in the raft backend which starts raft +// up and enables the cluster handler. +func (c *Core) startRaftStorage(ctx context.Context) (retErr error) { + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return nil + } + if raftStorage.Initialized() { + return nil + } + + // Retrieve the raft TLS information + raftTLSEntry, err := c.barrier.Get(ctx, raftTLSStoragePath) + if err != nil { + return err + } + + var creating bool + var raftTLS *raft.TLSKeyring + switch raftTLSEntry { + case nil: + // If we did not find a TLS keyring we will attempt to create one here. + // This happens after a storage migration process. This node is also + // marked to start as leader so we can write the new TLS Key. This is an + // error condition if there are already multiple nodes in the cluster, + // and the below storage write will fail. If the cluster is somehow in + // this state the unseal will fail and a cluster recovery will need to + // be done. + creating = true + raftTLSKey, err := raft.GenerateTLSKey(c.secureRandomReader) + if err != nil { + return err + } + + raftTLS = &raft.TLSKeyring{ + Keys: []*raft.TLSKey{raftTLSKey}, + ActiveKeyID: raftTLSKey.ID, + } + default: + raftTLS = new(raft.TLSKeyring) + if err := raftTLSEntry.DecodeJSON(raftTLS); err != nil { + return err + } + } + + raftStorage.SetRestoreCallback(c.raftSnapshotRestoreCallback(true, true)) + if err := raftStorage.SetupCluster(ctx, raft.SetupOpts{ + TLSKeyring: raftTLS, + ClusterListener: c.getClusterListener(), + StartAsLeader: creating, + }); err != nil { + return err + } + + defer func() { + if retErr != nil { + c.logger.Info("stopping raft server") + if err := raftStorage.TeardownCluster(c.getClusterListener()); err != nil { + c.logger.Error("failed to stop raft server", "error", err) + } + } + }() + + // If we are in need of creating the TLS keyring then we should write it out + // to storage here. If we fail it may mean we couldn't become leader and we + // should error out. + if creating { + c.logger.Info("writing raft TLS keyring to storage") + entry, err := logical.StorageEntryJSON(raftTLSStoragePath, raftTLS) + if err != nil { + c.logger.Error("error marshaling raft TLS keyring", "error", err) + return err + } + if err := c.barrier.Put(ctx, entry); err != nil { + c.logger.Error("error writing raft TLS keyring", "error", err) + return err + } + } + + return nil +} + +func (c *Core) setupRaftActiveNode(ctx context.Context) error { + c.pendingRaftPeers = make(map[string][]byte) + return c.startPeriodicRaftTLSRotate(ctx) +} + +func (c *Core) stopRaftActiveNode() { + c.pendingRaftPeers = nil + c.stopPeriodicRaftTLSRotate() +} + +// startPeriodicRaftTLSRotate will spawn a go routine in charge of periodically +// rotating the TLS certs and keys used for raft traffic. +// +// The logic for updating the TLS certificate uses a pseudo two phase commit +// using the known applied indexes from standby nodes. When writing a new Key +// it will be appended to the end of the keyring. Standbys can start accepting +// connections with this key as soon as they see the update. Then it will write +// the keyring a second time indicating the applied index for this key update. +// +// The active node will wait until it sees all standby nodes are at or past the +// applied index for this update. At that point it will delete the older key +// and make the new key active. The key isn't officially in use until this +// happens. The dual write ensures the standby at least gets the first update +// containing the key before the active node switches over to using it. +// +// If a standby is shut down then it cannot advance the key term until it +// receives the update. This ensures a standby node isn't left behind and unable +// to reconnect with the cluster. Additionally, only one outstanding key +// is allowed for this same reason (max keyring size of 2). +func (c *Core) startPeriodicRaftTLSRotate(ctx context.Context) error { + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return nil + } + + stopCh := make(chan struct{}) + followerStates := &raftFollowerStates{ + followers: make(map[string]uint64), + } + + // Pre-populate the follower list with the set of peers. + raftConfig, err := raftStorage.GetConfiguration(ctx) + if err != nil { + return err + } + for _, server := range raftConfig.Servers { + if server.NodeID != raftStorage.NodeID() { + followerStates.update(server.NodeID, 0) + } + } + + logger := c.logger.Named("raft") + c.raftTLSRotationStopCh = stopCh + c.raftFollowerStates = followerStates + + readKeyring := func() (*raft.TLSKeyring, error) { + tlsKeyringEntry, err := c.barrier.Get(ctx, raftTLSStoragePath) + if err != nil { + return nil, err + } + if tlsKeyringEntry == nil { + return nil, errors.New("no keyring found") + } + var keyring raft.TLSKeyring + if err := tlsKeyringEntry.DecodeJSON(&keyring); err != nil { + return nil, err + } + + return &keyring, nil + } + + // rotateKeyring writes new key data to the keyring and adds an applied + // index that is used to verify it has been committed. The keys written in + // this function can be used on standbys but the active node doesn't start + // using it yet. + rotateKeyring := func() (time.Time, error) { + // Read the existing keyring + keyring, err := readKeyring() + if err != nil { + return time.Time{}, errwrap.Wrapf("failed to read raft TLS keyring: {{err}}", err) + } + + switch { + case len(keyring.Keys) == 2 && keyring.Keys[1].AppliedIndex == 0: + // If this case is hit then the second write to add the applied + // index failed. Attempt to write it again. + keyring.Keys[1].AppliedIndex = raftStorage.AppliedIndex() + keyring.AppliedIndex = raftStorage.AppliedIndex() + entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) + if err != nil { + return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + } + if err := c.barrier.Put(ctx, entry); err != nil { + return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + } + + case len(keyring.Keys) > 1: + // If there already exists a pending key update then the update + // hasn't replicated down to all standby nodes yet. Don't allow any + // new keys to be created until all standbys have seen this previous + // rotation. As a backoff strategy another rotation attempt is + // scheduled for 5 minutes from now. + logger.Warn("skipping new raft TLS config creation, keys are pending") + return time.Now().Add(time.Minute * 5), nil + } + + logger.Info("creating new raft TLS config") + + // Create a new key + raftTLSKey, err := raft.GenerateTLSKey(c.secureRandomReader) + if err != nil { + return time.Time{}, errwrap.Wrapf("failed to generate new raft TLS key: {{err}}", err) + } + + // Advance the term and store the new key + keyring.Term += 1 + keyring.Keys = append(keyring.Keys, raftTLSKey) + entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) + if err != nil { + return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + } + if err := c.barrier.Put(ctx, entry); err != nil { + return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + } + + // Write the keyring again with the new applied index. This allows us to + // track if standby nodes receive the update. + keyring.Keys[1].AppliedIndex = raftStorage.AppliedIndex() + keyring.AppliedIndex = raftStorage.AppliedIndex() + entry, err = logical.StorageEntryJSON(raftTLSStoragePath, keyring) + if err != nil { + return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + } + if err := c.barrier.Put(ctx, entry); err != nil { + return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + } + + logger.Info("wrote new raft TLS config") + // Schedule the next rotation + return raftTLSKey.CreatedTime.Add(raftTLSRotationPeriod), nil + } + + // checkCommitted verifies key updates have been applied to all nodes and + // finalizes the rotation by deleting the old keys and updating the raft + // backend. + checkCommitted := func() error { + keyring, err := readKeyring() + if err != nil { + return errwrap.Wrapf("failed to read raft TLS keyring: {{err}}", err) + } + + switch { + case len(keyring.Keys) == 1: + // No Keys to apply + return nil + case keyring.Keys[1].AppliedIndex != keyring.AppliedIndex: + // We haven't fully committed the new key, continue here + return nil + case followerStates.minIndex() < keyring.AppliedIndex: + // Not all the followers have applied the latest key + return nil + } + + // Upgrade to the new key + keyring.Keys = keyring.Keys[1:] + keyring.ActiveKeyID = keyring.Keys[0].ID + keyring.Term += 1 + entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) + if err != nil { + return errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + } + if err := c.barrier.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to write keyring: {{err}}", err) + } + + // Update the TLS Key in the backend + if err := raftStorage.SetTLSKeyring(keyring); err != nil { + return errwrap.Wrapf("failed to install keyring: {{err}}", err) + } + + logger.Info("installed new raft TLS key", "term", keyring.Term) + return nil + } + + // Read the keyring to calculate the time of next rotation. + keyring, err := readKeyring() + if err != nil { + return err + } + activeKey := keyring.GetActive() + if activeKey == nil { + return errors.New("no active raft TLS key found") + } + + // Start the process in a go routine + go func() { + nextRotationTime := activeKey.CreatedTime.Add(raftTLSRotationPeriod) + + keyCheckInterval := time.NewTicker(1 * time.Minute) + defer keyCheckInterval.Stop() + + var backoff bool + for { + // If we encountered and error we should try to create the key + // again. + if backoff { + nextRotationTime = time.Now().Add(10 * time.Second) + backoff = false + } + + select { + case <-keyCheckInterval.C: + err := checkCommitted() + if err != nil { + logger.Error("failed to activate TLS key", "error", err) + } + case <-time.After(time.Until(nextRotationTime)): + // It's time to rotate the keys + next, err := rotateKeyring() + if err != nil { + logger.Error("failed to rotate TLS key", "error", err) + backoff = true + continue + } + + nextRotationTime = next + + case <-stopCh: + return + } + } + }() + + return nil +} + +func (c *Core) createRaftTLSKeyring(ctx context.Context) error { + if _, ok := c.underlyingPhysical.(*raft.RaftBackend); !ok { + return nil + } + + raftTLS, err := raft.GenerateTLSKey(c.secureRandomReader) + if err != nil { + return err + } + + keyring := &raft.TLSKeyring{ + Keys: []*raft.TLSKey{raftTLS}, + ActiveKeyID: raftTLS.ID, + } + + entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) + if err != nil { + return err + } + if err := c.barrier.Put(ctx, entry); err != nil { + return err + } + return nil +} + +func (c *Core) stopPeriodicRaftTLSRotate() { + if c.raftTLSRotationStopCh != nil { + close(c.raftTLSRotationStopCh) + } + c.raftTLSRotationStopCh = nil + c.raftFollowerStates = nil +} + +func (c *Core) checkRaftTLSKeyUpgrades(ctx context.Context) error { + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return nil + } + + tlsKeyringEntry, err := c.barrier.Get(ctx, raftTLSStoragePath) + if err != nil { + return err + } + if tlsKeyringEntry == nil { + return nil + } + + var keyring raft.TLSKeyring + if err := tlsKeyringEntry.DecodeJSON(&keyring); err != nil { + return err + } + + if err := raftStorage.SetTLSKeyring(&keyring); err != nil { + return err + } + + return nil +} + +// handleSnapshotRestore is for the raft backend to hook back into core after a +// snapshot is restored so we can clear the necessary caches and handle changing +// keyrings or master keys +func (c *Core) raftSnapshotRestoreCallback(grabLock bool, sealNode bool) func(context.Context) error { + return func(ctx context.Context) (retErr error) { + c.logger.Info("running post snapshot restore invalidations") + + if grabLock { + // Grab statelock + if stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, c.standbyStopCh); stopped { + c.logger.Error("did not apply snapshot; vault is shutting down") + return errors.New("did not apply snapshot; vault is shutting down") + } + defer c.stateLock.Unlock() + } + + if sealNode { + // If we failed to restore the snapshot we should seal this node as + // it's in an unknown state + defer func() { + if retErr != nil { + if err := c.sealInternalWithOptions(false, false, true); err != nil { + c.logger.Error("failed to seal node", "error", err) + } + } + }() + } + + // Purge the cache so we make sure we are operating on fresh data + c.physicalCache.Purge(ctx) + + // Reload the keyring in case it changed. If this fails it's likely + // we've changed master keys. + err := c.performKeyUpgrades(ctx) + if err != nil { + // The snapshot contained a master key or keyring we couldn't + // recover + switch c.seal.BarrierType() { + case seal.Shamir: + // If we are a shamir seal we can't do anything. Just + // seal all nodes. + + // Seal ourselves + c.logger.Info("failed to perform key upgrades, sealing", "error", err) + return err + default: + // If we are using an auto-unseal we can try to use the seal to + // unseal again. If the auto-unseal mechanism has changed then + // there isn't anything we can do but seal. + c.logger.Info("failed to perform key upgrades, reloading using auto seal") + keys, err := c.seal.GetStoredKeys(ctx) + if err != nil { + c.logger.Error("raft snapshot restore failed to get stored keys", "error", err) + return err + } + if err := c.barrier.Seal(); err != nil { + c.logger.Error("raft snapshot restore failed to seal barrier", "error", err) + return err + } + if err := c.barrier.Unseal(ctx, keys[0]); err != nil { + c.logger.Error("raft snapshot restore failed to unseal barrier", "error", err) + return err + } + c.logger.Info("done reloading master key using auto seal") + } + } + + return nil + } +} + +func (c *Core) JoinRaftCluster(ctx context.Context, leaderAddr string, tlsConfig *tls.Config, retry, nonVoter bool) (bool, error) { + if len(leaderAddr) == 0 { + return false, errors.New("No leader address provided") + } + + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return false, errors.New("raft storage not configured") + } + + if raftStorage.Initialized() { + return false, errors.New("raft is already initialized") + } + + init, err := c.Initialized(ctx) + if err != nil { + return false, errwrap.Wrapf("failed to check if core is initialized: {{err}}", err) + } + if init { + return false, errwrap.Wrapf("join can't be invoked on an initialized cluster: {{err}}", ErrAlreadyInit) + } + + transport := cleanhttp.DefaultPooledTransport() + if tlsConfig != nil { + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + return false, errwrap.Wrapf("failed to configure TLS: {{err}}", err) + } + } + client := &http.Client{ + Transport: transport, + } + config := api.DefaultConfig() + if config.Error != nil { + return false, errwrap.Wrapf("failed to create api client: {{err}}", config.Error) + } + config.Address = leaderAddr + config.HttpClient = client + config.MaxRetries = 0 + apiClient, err := api.NewClient(config) + if err != nil { + return false, errwrap.Wrapf("failed to create api client: {{err}}", err) + } + + join := func() error { + // Unwrap the token + secret, err := apiClient.Logical().Write("sys/storage/raft/bootstrap/challenge", map[string]interface{}{ + "server_id": raftStorage.NodeID(), + }) + if err != nil { + return errwrap.Wrapf("error during bootstrap init call: {{err}}", err) + } + if secret == nil { + return errors.New("could not retrieve bootstrap package") + } + + var sealConfig SealConfig + err = mapstructure.Decode(secret.Data["seal_config"], &sealConfig) + if err != nil { + return err + } + + if sealConfig.Type != c.seal.BarrierType() { + return fmt.Errorf("mismatching seal types between leader (%s) and follower (%s)", sealConfig.Type, c.seal.BarrierType()) + } + + challengeB64, ok := secret.Data["challenge"] + if !ok { + return errors.New("error during raft bootstrap call, no challenge given") + } + challengeRaw, err := base64.StdEncoding.DecodeString(challengeB64.(string)) + if err != nil { + return errwrap.Wrapf("error decoding challenge: {{err}}", err) + } + + eBlob := &physical.EncryptedBlobInfo{} + if err := proto.Unmarshal(challengeRaw, eBlob); err != nil { + return errwrap.Wrapf("error decoding challenge: {{err}}", err) + } + raftInfo := &raftInformation{ + challenge: eBlob, + leaderClient: apiClient, + leaderBarrierConfig: &sealConfig, + nonVoter: nonVoter, + } + if c.seal.BarrierType() == seal.Shamir { + c.raftInfo = raftInfo + c.seal.SetBarrierConfig(ctx, &sealConfig) + return nil + } + + if err := c.joinRaftSendAnswer(ctx, c.seal.GetAccess(), raftInfo); err != nil { + return errwrap.Wrapf("failed to send answer to leader node: {{err}}", err) + } + + return nil + } + + switch retry { + case true: + go func() { + for { + // TODO add a way to shut this down + err := join() + if err == nil { + return + } + c.logger.Error("failed to join raft cluster", "error", err) + time.Sleep(time.Second * 2) + } + }() + + // Backgrounded so return false + return false, nil + default: + if err := join(); err != nil { + c.logger.Error("failed to join raft cluster", "error", err) + return false, errwrap.Wrapf("failed to join raft cluster: {{err}}", err) + } + } + + return true, nil +} + +// This is used in tests to override the cluster address +var UpdateClusterAddrForTests uint32 + +func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess seal.Access, raftInfo *raftInformation) error { + if raftInfo.challenge == nil { + return errors.New("raft challenge is nil") + } + + raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend) + if !ok { + return errors.New("raft storage not in use") + } + + if raftStorage.Initialized() { + return errors.New("raft is already initialized") + } + + plaintext, err := sealAccess.Decrypt(ctx, raftInfo.challenge) + if err != nil { + return errwrap.Wrapf("error decrypting challenge: {{err}}", err) + } + + parsedClusterAddr, err := url.Parse(c.ClusterAddr()) + if err != nil { + return errwrap.Wrapf("error parsing cluster address: {{err}}", err) + } + clusterAddr := parsedClusterAddr.Host + if atomic.LoadUint32(&UpdateClusterAddrForTests) == 1 && strings.HasSuffix(clusterAddr, ":0") { + // We are testing and have an address provider, so just create a random + // addr, it will be overwritten later. + var err error + clusterAddr, err = uuid.GenerateUUID() + if err != nil { + return err + } + } + + answerReq := raftInfo.leaderClient.NewRequest("PUT", "/v1/sys/storage/raft/bootstrap/answer") + if err := answerReq.SetJSONBody(map[string]interface{}{ + "answer": base64.StdEncoding.EncodeToString(plaintext), + "cluster_addr": clusterAddr, + "server_id": raftStorage.NodeID(), + "non_voter": raftInfo.nonVoter, + }); err != nil { + return err + } + + answerRespJson, err := raftInfo.leaderClient.RawRequestWithContext(ctx, answerReq) + if answerRespJson != nil { + defer answerRespJson.Body.Close() + } + if err != nil { + return err + } + + var answerResp answerRespData + if err := jsonutil.DecodeJSONFromReader(answerRespJson.Body, &answerResp); err != nil { + return err + } + + raftStorage.Bootstrap(ctx, answerResp.Data.Peers) + + err = c.startClusterListener(ctx) + if err != nil { + return errwrap.Wrapf("error starting cluster: {{err}}", err) + } + + raftStorage.SetRestoreCallback(c.raftSnapshotRestoreCallback(true, true)) + err = raftStorage.SetupCluster(ctx, raft.SetupOpts{ + TLSKeyring: answerResp.Data.TLSKeyring, + ClusterListener: c.getClusterListener(), + }) + if err != nil { + return errwrap.Wrapf("failed to setup raft cluster: {{err}}", err) + } + + return nil +} + +func (c *Core) isRaftUnseal() bool { + return c.raftInfo != nil +} + +type answerRespData struct { + Data answerResp `json:"data"` +} + +type answerResp struct { + Peers []raft.Peer `json:"peers"` + TLSKeyring *raft.TLSKeyring `json:"tls_keyring"` +} diff --git a/vendor/github.com/hashicorp/vault/vault/rekey.go b/vendor/github.com/hashicorp/vault/vault/rekey.go new file mode 100644 index 00000000..f6505a47 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/rekey.go @@ -0,0 +1,1021 @@ +package vault + +import ( + "bytes" + "context" + "crypto/subtle" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/shamir" + "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" +) + +const ( + // coreUnsealKeysBackupPath is the path used to backup encrypted unseal + // keys if specified during a rekey operation. This is outside of the + // barrier. + coreBarrierUnsealKeysBackupPath = "core/unseal-keys-backup" + + // coreRecoveryUnsealKeysBackupPath is the path used to backup encrypted + // recovery keys if specified during a rekey operation. This is outside of + // the barrier. + coreRecoveryUnsealKeysBackupPath = "core/recovery-keys-backup" +) + +// RekeyResult is used to provide the key parts back after +// they are generated as part of the rekey. +type RekeyResult struct { + SecretShares [][]byte + PGPFingerprints []string + Backup bool + RecoveryKey bool + VerificationRequired bool + VerificationNonce string +} + +type RekeyVerifyResult struct { + Complete bool + Nonce string +} + +// RekeyBackup stores the backup copy of PGP-encrypted keys +type RekeyBackup struct { + Nonce string + Keys map[string][]string +} + +// RekeyThreshold returns the secret threshold for the current seal +// config. This threshold can either be the barrier key threshold or +// the recovery key threshold, depending on whether rekey is being +// performed on the recovery key, or whether the seal supports +// recovery keys. +func (c *Core) RekeyThreshold(ctx context.Context, recovery bool) (int, logical.HTTPCodedError) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return 0, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return 0, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.RLock() + defer c.rekeyLock.RUnlock() + + var config *SealConfig + var err error + // If we are rekeying the recovery key, or if the seal supports + // recovery keys and we are rekeying the barrier key, we use the + // recovery config as the threshold instead. + if recovery || c.seal.RecoveryKeySupported() { + config, err = c.seal.RecoveryConfig(ctx) + } else { + config, err = c.seal.BarrierConfig(ctx) + } + if err != nil { + return 0, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("unable to look up config: {{err}}", err).Error()) + } + if config == nil { + return 0, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error()) + } + + return config.SecretThreshold, nil +} + +// RekeyProgress is used to return the rekey progress (num shares). +func (c *Core) RekeyProgress(recovery, verification bool) (bool, int, logical.HTTPCodedError) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return false, 0, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return false, 0, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.RLock() + defer c.rekeyLock.RUnlock() + + var conf *SealConfig + if recovery { + conf = c.recoveryRekeyConfig + } else { + conf = c.barrierRekeyConfig + } + + if conf == nil { + return false, 0, logical.CodedError(http.StatusBadRequest, "rekey operation not in progress") + } + + if verification { + return len(conf.VerificationKey) > 0, len(conf.VerificationProgress), nil + } + return true, len(conf.RekeyProgress), nil +} + +// RekeyConfig is used to read the rekey configuration +func (c *Core) RekeyConfig(recovery bool) (*SealConfig, logical.HTTPCodedError) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Copy the seal config if any + var conf *SealConfig + if recovery { + if c.recoveryRekeyConfig != nil { + conf = c.recoveryRekeyConfig.Clone() + } + } else { + if c.barrierRekeyConfig != nil { + conf = c.barrierRekeyConfig.Clone() + } + } + + return conf, nil +} + +// RekeyInit will either initialize the rekey of barrier or recovery key. +// recovery determines whether this is a rekey on the barrier or recovery key. +func (c *Core) RekeyInit(config *SealConfig, recovery bool) logical.HTTPCodedError { + if config.SecretThreshold > config.SecretShares { + return logical.CodedError(http.StatusBadRequest, "provided threshold greater than the total shares") + } + + if recovery { + return c.RecoveryRekeyInit(config) + } + return c.BarrierRekeyInit(config) +} + +// BarrierRekeyInit is used to initialize the rekey settings for the barrier key +func (c *Core) BarrierRekeyInit(config *SealConfig) logical.HTTPCodedError { + switch c.seal.BarrierType() { + case seal.Shamir: + // As of Vault 1.3 all seals use StoredShares==1. The one exception is + // legacy shamir seals, which we can read but not write (by design). + // So if someone does a rekey, regardless of their intention, we're going + // to migrate them to a non-legacy Shamir seal. + if config.StoredShares != 1 { + c.logger.Warn("shamir stored keys supported, forcing rekey shares/threshold to 1") + config.StoredShares = 1 + } + default: + if config.StoredShares != 1 { + c.logger.Warn("stored keys supported, forcing rekey shares/threshold to 1") + config.StoredShares = 1 + } + config.SecretShares = 1 + config.SecretThreshold = 1 + + if len(config.PGPKeys) > 0 { + return logical.CodedError(http.StatusBadRequest, "PGP key encryption not supported when using stored keys") + } + if config.Backup { + return logical.CodedError(http.StatusBadRequest, "key backup not supported when using stored keys") + } + } + + if c.seal.RecoveryKeySupported() { + if config.VerificationRequired { + return logical.CodedError(http.StatusBadRequest, "requiring verification not supported when rekeying the barrier key with recovery keys") + } + c.logger.Debug("using recovery seal configuration to rekey barrier key") + } + + // Check if the seal configuration is valid + if err := config.Validate(); err != nil { + c.logger.Error("invalid rekey seal configuration", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid rekey seal configuration: {{err}}", err).Error()) + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Prevent multiple concurrent re-keys + if c.barrierRekeyConfig != nil { + return logical.CodedError(http.StatusBadRequest, "rekey already in progress") + } + + // Copy the configuration + c.barrierRekeyConfig = config.Clone() + + // Initialize the nonce + nonce, err := uuid.GenerateUUID() + if err != nil { + c.barrierRekeyConfig = nil + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error()) + } + c.barrierRekeyConfig.Nonce = nonce + + if c.logger.IsInfo() { + c.logger.Info("rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold, "validation_required", c.barrierRekeyConfig.VerificationRequired) + } + return nil +} + +// RecoveryRekeyInit is used to initialize the rekey settings for the recovery key +func (c *Core) RecoveryRekeyInit(config *SealConfig) logical.HTTPCodedError { + if config.StoredShares > 0 { + return logical.CodedError(http.StatusBadRequest, "stored shares not supported by recovery key") + } + + // Check if the seal configuration is valid + if err := config.Validate(); err != nil { + c.logger.Error("invalid recovery configuration", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid recovery configuration: {{err}}", err).Error()) + } + + if !c.seal.RecoveryKeySupported() { + return logical.CodedError(http.StatusBadRequest, "recovery keys not supported") + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Prevent multiple concurrent re-keys + if c.recoveryRekeyConfig != nil { + return logical.CodedError(http.StatusBadRequest, "rekey already in progress") + } + + // Copy the configuration + c.recoveryRekeyConfig = config.Clone() + + // Initialize the nonce + nonce, err := uuid.GenerateUUID() + if err != nil { + c.recoveryRekeyConfig = nil + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error()) + } + c.recoveryRekeyConfig.Nonce = nonce + + if c.logger.IsInfo() { + c.logger.Info("rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold, "validation_required", c.recoveryRekeyConfig.VerificationRequired) + } + return nil +} + +// RekeyUpdate is used to provide a new key part for the barrier or recovery key. +func (c *Core) RekeyUpdate(ctx context.Context, key []byte, nonce string, recovery bool) (*RekeyResult, logical.HTTPCodedError) { + if recovery { + return c.RecoveryRekeyUpdate(ctx, key, nonce) + } + return c.BarrierRekeyUpdate(ctx, key, nonce) +} + +// BarrierRekeyUpdate is used to provide a new key part. Barrier rekey can be done +// with unseal keys, or recovery keys if that's supported and we are storing the barrier +// key. +// +// N.B.: If recovery keys are used to rekey, the new barrier key shares are not returned. +func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) (*RekeyResult, logical.HTTPCodedError) { + // Ensure we are already unsealed + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + // Verify the key length + min, max := c.barrier.KeyLength() + max += shamir.ShareOverhead + if len(key) < min { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min)) + } + if len(key) > max { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max)) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Get the seal configuration + var existingConfig *SealConfig + var err error + var useRecovery bool // Determines whether recovery key is being used to rekey the master key + if c.seal.StoredKeysSupported() == StoredKeysSupportedGeneric && c.seal.RecoveryKeySupported() { + existingConfig, err = c.seal.RecoveryConfig(ctx) + useRecovery = true + } else { + existingConfig, err = c.seal.BarrierConfig(ctx) + } + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing config: {{err}}", err).Error()) + } + // Ensure the barrier is initialized + if existingConfig == nil { + return nil, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error()) + } + + // Ensure a rekey is in progress + if c.barrierRekeyConfig == nil { + return nil, logical.CodedError(http.StatusBadRequest, "no barrier rekey in progress") + } + + if len(c.barrierRekeyConfig.VerificationKey) > 0 { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("rekey operation already finished; verification must be performed; nonce for the verification operation is %q", c.barrierRekeyConfig.VerificationNonce)) + } + + if nonce != c.barrierRekeyConfig.Nonce { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this rekey operation is %q", c.barrierRekeyConfig.Nonce)) + } + + // Check if we already have this piece + for _, existing := range c.barrierRekeyConfig.RekeyProgress { + if subtle.ConstantTimeCompare(existing, key) == 1 { + return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this generation operation") + } + } + + // Store this key + c.barrierRekeyConfig.RekeyProgress = append(c.barrierRekeyConfig.RekeyProgress, key) + + // Check if we don't have enough keys to unlock + if len(c.barrierRekeyConfig.RekeyProgress) < existingConfig.SecretThreshold { + if c.logger.IsDebug() { + c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyConfig.RekeyProgress), "threshold", existingConfig.SecretThreshold) + } + return nil, nil + } + + // Recover the master key or recovery key + var recoveredKey []byte + if existingConfig.SecretThreshold == 1 { + recoveredKey = c.barrierRekeyConfig.RekeyProgress[0] + c.barrierRekeyConfig.RekeyProgress = nil + } else { + recoveredKey, err = shamir.Combine(c.barrierRekeyConfig.RekeyProgress) + c.barrierRekeyConfig.RekeyProgress = nil + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute master key: {{err}}", err).Error()) + } + } + + switch { + case useRecovery: + if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil { + c.logger.Error("rekey recovery key verification failed", "error", err) + return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error()) + } + case c.seal.BarrierType() == seal.Shamir: + if c.seal.StoredKeysSupported() == StoredKeysSupportedShamirMaster { + testseal := NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("testseal"))) + testseal.SetCore(c) + err = testseal.GetAccess().(*shamirseal.ShamirSeal).SetKey(recoveredKey) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup unseal key: {{err}}", err).Error()) + } + cfg, err := c.seal.BarrierConfig(ctx) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err).Error()) + } + testseal.SetCachedBarrierConfig(cfg) + stored, err := testseal.GetStoredKeys(ctx) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to read master key: {{err}}", err).Error()) + } + recoveredKey = stored[0] + } + if err := c.barrier.VerifyMaster(recoveredKey); err != nil { + c.logger.Error("master key verification failed", "error", err) + return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("master key verification failed: {{err}}", err).Error()) + } + } + + // Generate a new key: for AutoUnseal, this is a new master key; for Shamir, + // this is a new unseal key, and performBarrierRekey will also generate a + // new master key. + newKey, err := c.barrier.GenerateKey(c.secureRandomReader) + if err != nil { + c.logger.Error("failed to generate master key", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("master key generation failed: {{err}}", err).Error()) + } + + results := &RekeyResult{ + Backup: c.barrierRekeyConfig.Backup, + } + if c.seal.StoredKeysSupported() != StoredKeysSupportedGeneric { + // Set result.SecretShares to the new key itself if only a single key + // part is used -- no Shamir split required. + if c.barrierRekeyConfig.SecretShares == 1 { + results.SecretShares = append(results.SecretShares, newKey) + } else { + // Split the new key using the Shamir algorithm + shares, err := shamir.Split(newKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold) + if err != nil { + c.logger.Error("failed to generate shares", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error()) + } + results.SecretShares = shares + } + } + + // If PGP keys are passed in, encrypt shares with corresponding PGP keys. + if len(c.barrierRekeyConfig.PGPKeys) > 0 { + hexEncodedShares := make([][]byte, len(results.SecretShares)) + for i, _ := range results.SecretShares { + hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i])) + } + results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.barrierRekeyConfig.PGPKeys) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error()) + } + + // If backup is enabled, store backup info in vault.coreBarrierUnsealKeysBackupPath + if c.barrierRekeyConfig.Backup { + backupInfo := map[string][]string{} + for i := 0; i < len(results.PGPFingerprints); i++ { + encShare := bytes.NewBuffer(results.SecretShares[i]) + if backupInfo[results.PGPFingerprints[i]] == nil { + backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())} + } else { + backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes())) + } + } + + backupVals := &RekeyBackup{ + Nonce: c.barrierRekeyConfig.Nonce, + Keys: backupInfo, + } + buf, err := json.Marshal(backupVals) + if err != nil { + c.logger.Error("failed to marshal unseal key backup", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal unseal key backup: {{err}}", err).Error()) + } + pe := &physical.Entry{ + Key: coreBarrierUnsealKeysBackupPath, + Value: buf, + } + if err = c.physical.Put(ctx, pe); err != nil { + c.logger.Error("failed to save unseal key backup", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error()) + } + } + } + + // If we are requiring validation, return now; otherwise rekey the barrier + if c.barrierRekeyConfig.VerificationRequired { + nonce, err := uuid.GenerateUUID() + if err != nil { + c.barrierRekeyConfig = nil + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error()) + } + c.barrierRekeyConfig.VerificationNonce = nonce + c.barrierRekeyConfig.VerificationKey = newKey + + results.VerificationRequired = true + results.VerificationNonce = nonce + return results, nil + } + + if err := c.performBarrierRekey(ctx, newKey); err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform barrier rekey: {{err}}", err).Error()) + } + + c.barrierRekeyConfig = nil + return results, nil +} + +func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logical.HTTPCodedError { + legacyUpgrade := c.seal.StoredKeysSupported() == StoredKeysNotSupported + if legacyUpgrade { + // We won't be able to call SetStoredKeys without setting StoredShares=1. + existingConfig, err := c.seal.BarrierConfig(ctx) + if err != nil { + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing config: {{err}}", err).Error()) + } + existingConfig.StoredShares = 1 + c.seal.SetCachedBarrierConfig(existingConfig) + } + + if c.seal.StoredKeysSupported() != StoredKeysSupportedGeneric { + err := c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(newSealKey) + if err != nil { + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to update barrier seal key: {{err}}", err).Error()) + } + } + + newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) + if err != nil { + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform rekey: {{err}}", err).Error()) + } + if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil { + c.logger.Error("failed to store keys", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to store keys: {{err}}", err).Error()) + } + + // Rekey the barrier + if err := c.barrier.Rekey(ctx, newMasterKey); err != nil { + c.logger.Error("failed to rekey barrier", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to rekey barrier: {{err}}", err).Error()) + } + if c.logger.IsInfo() { + c.logger.Info("security barrier rekeyed", "stored", c.barrierRekeyConfig.StoredShares, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) + } + + if len(newSealKey) > 0 { + err := c.barrier.Put(ctx, &logical.StorageEntry{ + Key: shamirKekPath, + Value: newSealKey, + }) + if err != nil { + c.logger.Error("failed to store new seal key", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to store new seal key: {{err}}", err).Error()) + } + } + + c.barrierRekeyConfig.VerificationKey = nil + + if err := c.seal.SetBarrierConfig(ctx, c.barrierRekeyConfig); err != nil { + c.logger.Error("error saving rekey seal configuration", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error()) + } + + // Write to the canary path, which will force a synchronous truing during + // replication + if err := c.barrier.Put(ctx, &logical.StorageEntry{ + Key: coreKeyringCanaryPath, + Value: []byte(c.barrierRekeyConfig.Nonce), + }); err != nil { + c.logger.Error("error saving keyring canary", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error()) + } + + c.barrierRekeyConfig.RekeyProgress = nil + + return nil +} + +// RecoveryRekeyUpdate is used to provide a new key part +func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string) (*RekeyResult, logical.HTTPCodedError) { + // Ensure we are already unsealed + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + // Verify the key length + min, max := c.barrier.KeyLength() + max += shamir.ShareOverhead + if len(key) < min { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min)) + } + if len(key) > max { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max)) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Get the seal configuration + existingConfig, err := c.seal.RecoveryConfig(ctx) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing recovery config: {{err}}", err).Error()) + } + // Ensure the seal is initialized + if existingConfig == nil { + return nil, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error()) + } + + // Ensure a rekey is in progress + if c.recoveryRekeyConfig == nil { + return nil, logical.CodedError(http.StatusBadRequest, "no recovery rekey in progress") + } + + if len(c.recoveryRekeyConfig.VerificationKey) > 0 { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("rekey operation already finished; verification must be performed; nonce for the verification operation is %q", c.recoveryRekeyConfig.VerificationNonce)) + } + + if nonce != c.recoveryRekeyConfig.Nonce { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this rekey operation is %q", c.recoveryRekeyConfig.Nonce)) + } + + // Check if we already have this piece + for _, existing := range c.recoveryRekeyConfig.RekeyProgress { + if subtle.ConstantTimeCompare(existing, key) == 1 { + return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this rekey operation") + } + } + + // Store this key + c.recoveryRekeyConfig.RekeyProgress = append(c.recoveryRekeyConfig.RekeyProgress, key) + + // Check if we don't have enough keys to unlock + if len(c.recoveryRekeyConfig.RekeyProgress) < existingConfig.SecretThreshold { + if c.logger.IsDebug() { + c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyConfig.RekeyProgress), "threshold", existingConfig.SecretThreshold) + } + return nil, nil + } + + // Recover the master key + var recoveryKey []byte + if existingConfig.SecretThreshold == 1 { + recoveryKey = c.recoveryRekeyConfig.RekeyProgress[0] + c.recoveryRekeyConfig.RekeyProgress = nil + } else { + recoveryKey, err = shamir.Combine(c.recoveryRekeyConfig.RekeyProgress) + c.recoveryRekeyConfig.RekeyProgress = nil + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute recovery key: {{err}}", err).Error()) + } + } + + // Verify the recovery key + if err := c.seal.VerifyRecoveryKey(ctx, recoveryKey); err != nil { + c.logger.Error("recovery key verification failed", "error", err) + return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error()) + } + + // Generate a new master key + newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) + if err != nil { + c.logger.Error("failed to generate recovery key", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("recovery key generation failed: {{err}}", err).Error()) + } + + // Return the master key if only a single key part is used + results := &RekeyResult{ + Backup: c.recoveryRekeyConfig.Backup, + } + + if c.recoveryRekeyConfig.SecretShares == 1 { + results.SecretShares = append(results.SecretShares, newMasterKey) + } else { + // Split the master key using the Shamir algorithm + shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold) + if err != nil { + c.logger.Error("failed to generate shares", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error()) + } + results.SecretShares = shares + } + + if len(c.recoveryRekeyConfig.PGPKeys) > 0 { + hexEncodedShares := make([][]byte, len(results.SecretShares)) + for i, _ := range results.SecretShares { + hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i])) + } + results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.recoveryRekeyConfig.PGPKeys) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error()) + } + + if c.recoveryRekeyConfig.Backup { + backupInfo := map[string][]string{} + for i := 0; i < len(results.PGPFingerprints); i++ { + encShare := bytes.NewBuffer(results.SecretShares[i]) + if backupInfo[results.PGPFingerprints[i]] == nil { + backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())} + } else { + backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes())) + } + } + + backupVals := &RekeyBackup{ + Nonce: c.recoveryRekeyConfig.Nonce, + Keys: backupInfo, + } + buf, err := json.Marshal(backupVals) + if err != nil { + c.logger.Error("failed to marshal recovery key backup", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal recovery key backup: {{err}}", err).Error()) + } + pe := &physical.Entry{ + Key: coreRecoveryUnsealKeysBackupPath, + Value: buf, + } + if err = c.physical.Put(ctx, pe); err != nil { + c.logger.Error("failed to save unseal key backup", "error", err) + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error()) + } + } + } + + // If we are requiring validation, return now; otherwise save the recovery + // key + if c.recoveryRekeyConfig.VerificationRequired { + nonce, err := uuid.GenerateUUID() + if err != nil { + c.recoveryRekeyConfig = nil + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error()) + } + c.recoveryRekeyConfig.VerificationNonce = nonce + c.recoveryRekeyConfig.VerificationKey = newMasterKey + + results.VerificationRequired = true + results.VerificationNonce = nonce + return results, nil + } + + if err := c.performRecoveryRekey(ctx, newMasterKey); err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery rekey: {{err}}", err).Error()) + } + + c.recoveryRekeyConfig = nil + return results, nil +} + +func (c *Core) performRecoveryRekey(ctx context.Context, newMasterKey []byte) logical.HTTPCodedError { + if err := c.seal.SetRecoveryKey(ctx, newMasterKey); err != nil { + c.logger.Error("failed to set recovery key", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to set recovery key: {{err}}", err).Error()) + } + + c.recoveryRekeyConfig.VerificationKey = nil + + if err := c.seal.SetRecoveryConfig(ctx, c.recoveryRekeyConfig); err != nil { + c.logger.Error("error saving rekey seal configuration", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error()) + } + + // Write to the canary path, which will force a synchronous truing during + // replication + if err := c.barrier.Put(ctx, &logical.StorageEntry{ + Key: coreKeyringCanaryPath, + Value: []byte(c.recoveryRekeyConfig.Nonce), + }); err != nil { + c.logger.Error("error saving keyring canary", "error", err) + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error()) + } + + c.recoveryRekeyConfig.RekeyProgress = nil + + return nil +} + +func (c *Core) RekeyVerify(ctx context.Context, key []byte, nonce string, recovery bool) (ret *RekeyVerifyResult, retErr logical.HTTPCodedError) { + // Ensure we are already unsealed + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + // Verify the key length + min, max := c.barrier.KeyLength() + max += shamir.ShareOverhead + if len(key) < min { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min)) + } + if len(key) > max { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max)) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + config := c.barrierRekeyConfig + if recovery { + config = c.recoveryRekeyConfig + } + + // Ensure a rekey is in progress + if config == nil { + return nil, logical.CodedError(http.StatusBadRequest, "no rekey in progress") + } + + if len(config.VerificationKey) == 0 { + return nil, logical.CodedError(http.StatusBadRequest, "no rekey verification in progress") + } + + if nonce != config.VerificationNonce { + return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this verify operation is %q", config.VerificationNonce)) + } + + // Check if we already have this piece + for _, existing := range config.VerificationProgress { + if subtle.ConstantTimeCompare(existing, key) == 1 { + return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this verify operation") + } + } + + // Store this key + config.VerificationProgress = append(config.VerificationProgress, key) + + // Check if we don't have enough keys to unlock + if len(config.VerificationProgress) < config.SecretThreshold { + if c.logger.IsDebug() { + c.logger.Debug("cannot verify yet, not enough keys", "keys", len(config.VerificationProgress), "threshold", config.SecretThreshold) + } + return nil, nil + } + + // Schedule the progress for forgetting and rotate the nonce if possible + defer func() { + config.VerificationProgress = nil + if ret != nil && ret.Complete { + return + } + // Not complete, so rotate nonce + nonce, err := uuid.GenerateUUID() + if err == nil { + config.VerificationNonce = nonce + if ret != nil { + ret.Nonce = nonce + } + } + }() + + // Recover the master key or recovery key + var recoveredKey []byte + if config.SecretThreshold == 1 { + recoveredKey = config.VerificationProgress[0] + } else { + var err error + recoveredKey, err = shamir.Combine(config.VerificationProgress) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute key for verification: {{err}}", err).Error()) + } + } + + if subtle.ConstantTimeCompare(recoveredKey, config.VerificationKey) != 1 { + c.logger.Error("rekey verification failed") + return nil, logical.CodedError(http.StatusBadRequest, "rekey verification failed; incorrect key shares supplied") + } + + switch recovery { + case false: + if err := c.performBarrierRekey(ctx, recoveredKey); err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform rekey: {{err}}", err).Error()) + } + c.barrierRekeyConfig = nil + default: + if err := c.performRecoveryRekey(ctx, recoveredKey); err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery key rekey: {{err}}", err).Error()) + } + c.recoveryRekeyConfig = nil + } + + res := &RekeyVerifyResult{ + Nonce: config.VerificationNonce, + Complete: true, + } + + return res, nil +} + +// RekeyCancel is used to cancel an in-progress rekey +func (c *Core) RekeyCancel(recovery bool) logical.HTTPCodedError { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Clear any progress or config + if recovery { + c.recoveryRekeyConfig = nil + } else { + c.barrierRekeyConfig = nil + } + return nil +} + +// RekeyVerifyRestart is used to start the verification process over +func (c *Core) RekeyVerifyRestart(recovery bool) logical.HTTPCodedError { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + if c.Sealed() { + return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + // Attempt to generate a new nonce, but don't bail if it doesn't succeed + // (which is extraordinarily unlikely) + nonce, nonceErr := uuid.GenerateUUID() + + // Clear any progress or config + if recovery { + c.recoveryRekeyConfig.VerificationProgress = nil + if nonceErr == nil { + c.recoveryRekeyConfig.VerificationNonce = nonce + } + } else { + c.barrierRekeyConfig.VerificationProgress = nil + if nonceErr == nil { + c.barrierRekeyConfig.VerificationNonce = nonce + } + } + + return nil +} + +// RekeyRetrieveBackup is used to retrieve any backed-up PGP-encrypted unseal +// keys +func (c *Core) RekeyRetrieveBackup(ctx context.Context, recovery bool) (*RekeyBackup, logical.HTTPCodedError) { + if c.Sealed() { + return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.RLock() + defer c.rekeyLock.RUnlock() + + var entry *physical.Entry + var err error + if recovery { + entry, err = c.physical.Get(ctx, coreRecoveryUnsealKeysBackupPath) + } else { + entry, err = c.physical.Get(ctx, coreBarrierUnsealKeysBackupPath) + } + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error getting keys from backup: {{err}}", err).Error()) + } + if entry == nil { + return nil, nil + } + + ret := &RekeyBackup{} + err = jsonutil.DecodeJSON(entry.Value, ret) + if err != nil { + return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error decoding backup keys: {{err}}", err).Error()) + } + + return ret, nil +} + +// RekeyDeleteBackup is used to delete any backed-up PGP-encrypted unseal keys +func (c *Core) RekeyDeleteBackup(ctx context.Context, recovery bool) logical.HTTPCodedError { + if c.Sealed() { + return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error()) + } + if c.standby { + return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error()) + } + + c.rekeyLock.Lock() + defer c.rekeyLock.Unlock() + + if recovery { + err := c.physical.Delete(ctx, coreRecoveryUnsealKeysBackupPath) + if err != nil { + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error()) + } + return nil + } + err := c.physical.Delete(ctx, coreBarrierUnsealKeysBackupPath) + if err != nil { + return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error()) + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/replication/cluster.go b/vendor/github.com/hashicorp/vault/vault/replication/cluster.go new file mode 100644 index 00000000..4030cd06 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/replication/cluster.go @@ -0,0 +1,16 @@ +// +build !enterprise + +package replication + +import "github.com/hashicorp/vault/sdk/helper/consts" + +type Cluster struct { + State consts.ReplicationState + ClusterID string + PrimaryClusterAddr string +} + +type Clusters struct { + DR *Cluster + Performance *Cluster +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go new file mode 100644 index 00000000..b97263e6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go @@ -0,0 +1,359 @@ +package vault + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + math "math" + "net/http" + "net/url" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/forwarding" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/replication" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +type requestForwardingHandler struct { + fws *http2.Server + fwRPCServer *grpc.Server + logger log.Logger + ha bool + core *Core + stopCh chan struct{} +} + +type requestForwardingClusterClient struct { + core *Core +} + +// NewRequestForwardingHandler creates a cluster handler for use with request +// forwarding. +func NewRequestForwardingHandler(c *Core, fws *http2.Server, perfStandbySlots chan struct{}, perfStandbyRepCluster *replication.Cluster) (*requestForwardingHandler, error) { + // Resolve locally to avoid races + ha := c.ha != nil + + fwRPCServer := grpc.NewServer( + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 2 * cluster.HeartbeatInterval, + }), + grpc.MaxRecvMsgSize(math.MaxInt32), + grpc.MaxSendMsgSize(math.MaxInt32), + ) + + if ha && c.clusterHandler != nil { + RegisterRequestForwardingServer(fwRPCServer, &forwardedRequestRPCServer{ + core: c, + handler: c.clusterHandler, + perfStandbySlots: perfStandbySlots, + perfStandbyRepCluster: perfStandbyRepCluster, + raftFollowerStates: c.raftFollowerStates, + }) + } + + return &requestForwardingHandler{ + fws: fws, + fwRPCServer: fwRPCServer, + ha: ha, + logger: c.logger.Named("request-forward"), + core: c, + stopCh: make(chan struct{}), + }, nil +} + +// ClientLookup satisfies the ClusterClient interface and returns the ha tls +// client certs. +func (c *requestForwardingClusterClient) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { + parsedCert := c.core.localClusterParsedCert.Load().(*x509.Certificate) + if parsedCert == nil { + return nil, nil + } + currCert := c.core.localClusterCert.Load().([]byte) + if len(currCert) == 0 { + return nil, nil + } + localCert := make([]byte, len(currCert)) + copy(localCert, currCert) + + for _, subj := range requestInfo.AcceptableCAs { + if bytes.Equal(subj, parsedCert.RawIssuer) { + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: c.core.localClusterPrivateKey.Load().(*ecdsa.PrivateKey), + Leaf: c.core.localClusterParsedCert.Load().(*x509.Certificate), + }, nil + } + } + + return nil, nil +} + +// ServerLookup satisfies the ClusterHandler interface and returns the server's +// tls certs. +func (rf *requestForwardingHandler) ServerLookup(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + currCert := rf.core.localClusterCert.Load().([]byte) + if len(currCert) == 0 { + return nil, fmt.Errorf("got forwarding connection but no local cert") + } + + localCert := make([]byte, len(currCert)) + copy(localCert, currCert) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: rf.core.localClusterPrivateKey.Load().(*ecdsa.PrivateKey), + Leaf: rf.core.localClusterParsedCert.Load().(*x509.Certificate), + }, nil +} + +// CALookup satisfies the ClusterHandler interface and returns the ha ca cert. +func (rf *requestForwardingHandler) CALookup(ctx context.Context) ([]*x509.Certificate, error) { + parsedCert := rf.core.localClusterParsedCert.Load().(*x509.Certificate) + + if parsedCert == nil { + return nil, fmt.Errorf("forwarding connection client but no local cert") + } + + return []*x509.Certificate{parsedCert}, nil +} + +// Handoff serves a request forwarding connection. +func (rf *requestForwardingHandler) Handoff(ctx context.Context, shutdownWg *sync.WaitGroup, closeCh chan struct{}, tlsConn *tls.Conn) error { + if !rf.ha { + tlsConn.Close() + return nil + } + + rf.logger.Debug("got request forwarding connection") + + shutdownWg.Add(2) + // quitCh is used to close the connection and the second + // goroutine if the server closes before closeCh. + quitCh := make(chan struct{}) + go func() { + select { + case <-quitCh: + case <-closeCh: + case <-rf.stopCh: + } + tlsConn.Close() + shutdownWg.Done() + }() + + go func() { + rf.fws.ServeConn(tlsConn, &http2.ServeConnOpts{ + Handler: rf.fwRPCServer, + BaseConfig: &http.Server{ + ErrorLog: rf.logger.StandardLogger(nil), + }, + }) + + // close the quitCh which will close the connection and + // the other goroutine. + close(quitCh) + shutdownWg.Done() + }() + + return nil +} + +// Stop stops the request forwarding server and closes connections. +func (rf *requestForwardingHandler) Stop() error { + // Give some time for existing RPCs to drain. + time.Sleep(cluster.ListenerAcceptDeadline) + close(rf.stopCh) + rf.fwRPCServer.Stop() + return nil +} + +// Starts the listeners and servers necessary to handle forwarded requests +func (c *Core) startForwarding(ctx context.Context) error { + c.logger.Debug("request forwarding setup function") + defer c.logger.Debug("leaving request forwarding setup function") + + // Clean up in case we have transitioned from a client to a server + c.requestForwardingConnectionLock.Lock() + c.clearForwardingClients() + c.requestForwardingConnectionLock.Unlock() + + if c.ha == nil || c.getClusterListener() == nil { + c.logger.Debug("request forwarding not setup") + return nil + } + + perfStandbyRepCluster, perfStandbySlots, err := c.perfStandbyClusterHandler() + if err != nil { + return err + } + + handler, err := NewRequestForwardingHandler(c, c.getClusterListener().Server(), perfStandbySlots, perfStandbyRepCluster) + if err != nil { + return err + } + + c.getClusterListener().AddHandler(consts.RequestForwardingALPN, handler) + + return nil +} + +func (c *Core) stopForwarding() { + if c.getClusterListener() != nil { + c.getClusterListener().StopHandler(consts.RequestForwardingALPN) + c.getClusterListener().StopHandler(consts.PerfStandbyALPN) + } + c.removeAllPerfStandbySecondaries() +} + +// refreshRequestForwardingConnection ensures that the client/transport are +// alive and that the current active address value matches the most +// recently-known address. +func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAddr string) error { + c.logger.Debug("refreshing forwarding connection") + defer c.logger.Debug("done refreshing forwarding connection") + + c.requestForwardingConnectionLock.Lock() + defer c.requestForwardingConnectionLock.Unlock() + + // Clean things up first + c.clearForwardingClients() + + // If we don't have anything to connect to, just return + if clusterAddr == "" { + return nil + } + + clusterURL, err := url.Parse(clusterAddr) + if err != nil { + c.logger.Error("error parsing cluster address attempting to refresh forwarding connection", "error", err) + return err + } + + parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate) + if parsedCert == nil { + c.logger.Error("no request forwarding cluster certificate found") + return errors.New("no request forwarding cluster certificate found") + } + + clusterListener := c.getClusterListener() + if clusterListener != nil { + clusterListener.AddClient(consts.RequestForwardingALPN, &requestForwardingClusterClient{ + core: c, + }) + } + + // Set up grpc forwarding handling + // It's not really insecure, but we have to dial manually to get the + // ALPN header right. It's just "insecure" because GRPC isn't managing + // the TLS state. + dctx, cancelFunc := context.WithCancel(ctx) + c.rpcClientConn, err = grpc.DialContext(dctx, clusterURL.Host, + grpc.WithDialer(c.getGRPCDialer(ctx, consts.RequestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)), + grpc.WithInsecure(), // it's not, we handle it in the dialer + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 2 * cluster.HeartbeatInterval, + }), + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32), + grpc.MaxCallSendMsgSize(math.MaxInt32), + )) + if err != nil { + cancelFunc() + c.logger.Error("err setting up forwarding rpc client", "error", err) + return err + } + c.rpcClientConnContext = dctx + c.rpcClientConnCancelFunc = cancelFunc + c.rpcForwardingClient = &forwardingClient{ + RequestForwardingClient: NewRequestForwardingClient(c.rpcClientConn), + core: c, + echoTicker: time.NewTicker(cluster.HeartbeatInterval), + echoContext: dctx, + } + c.rpcForwardingClient.startHeartbeat() + + return nil +} + +func (c *Core) clearForwardingClients() { + c.logger.Debug("clearing forwarding clients") + defer c.logger.Debug("done clearing forwarding clients") + + if c.rpcClientConnCancelFunc != nil { + c.rpcClientConnCancelFunc() + c.rpcClientConnCancelFunc = nil + } + if c.rpcClientConn != nil { + c.rpcClientConn.Close() + c.rpcClientConn = nil + } + + c.rpcClientConnContext = nil + c.rpcForwardingClient = nil + + clusterListener := c.getClusterListener() + if clusterListener != nil { + clusterListener.RemoveClient(consts.RequestForwardingALPN) + } + c.clusterLeaderParams.Store((*ClusterLeaderParams)(nil)) +} + +// ForwardRequest forwards a given request to the active node and returns the +// response. +func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, error) { + c.requestForwardingConnectionLock.RLock() + defer c.requestForwardingConnectionLock.RUnlock() + + if c.rpcForwardingClient == nil { + return 0, nil, nil, ErrCannotForward + } + + origPath := req.URL.Path + defer func() { + req.URL.Path = origPath + }() + + req.URL.Path = req.Context().Value("original_request_path").(string) + + freq, err := forwarding.GenerateForwardedRequest(req) + if err != nil { + c.logger.Error("error creating forwarding RPC request", "error", err) + return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request") + } + if freq == nil { + c.logger.Error("got nil forwarding RPC request") + return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request") + } + resp, err := c.rpcForwardingClient.ForwardRequest(c.rpcClientConnContext, freq) + if err != nil { + c.logger.Error("error during forwarded RPC request", "error", err) + return 0, nil, nil, fmt.Errorf("error during forwarding RPC request") + } + + var header http.Header + if resp.HeaderEntries != nil { + header = make(http.Header) + for k, v := range resp.HeaderEntries { + header[k] = v.Values + } + } + + // If we are a perf standby and the request was forwarded to the active node + // we should attempt to wait for the WAL to ship to offer best effort read after + // write guarantees + if c.perfStandby && resp.LastRemoteWal > 0 { + WaitUntilWALShipped(req.Context(), c, resp.LastRemoteWal) + } + + return int(resp.StatusCode), header, resp.Body, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go new file mode 100644 index 00000000..4812fb43 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go @@ -0,0 +1,155 @@ +package vault + +import ( + "context" + "net/http" + "runtime" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/helper/forwarding" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/replication" +) + +type forwardedRequestRPCServer struct { + core *Core + handler http.Handler + perfStandbySlots chan struct{} + perfStandbyRepCluster *replication.Cluster + raftFollowerStates *raftFollowerStates +} + +func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *forwarding.Request) (*forwarding.Response, error) { + // Parse an http.Request out of it + req, err := forwarding.ParseForwardedRequest(freq) + if err != nil { + return nil, err + } + + // A very dummy response writer that doesn't follow normal semantics, just + // lets you write a status code (last written wins) and a body. But it + // meets the interface requirements. + w := forwarding.NewRPCResponseWriter() + + resp := &forwarding.Response{} + + runRequest := func() { + defer func() { + // Logic here comes mostly from the Go source code + if err := recover(); err != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + s.core.logger.Error("panic serving forwarded request", "path", req.URL.Path, "error", err, "stacktrace", string(buf)) + } + }() + s.handler.ServeHTTP(w, req) + } + runRequest() + resp.StatusCode = uint32(w.StatusCode()) + resp.Body = w.Body().Bytes() + + header := w.Header() + if header != nil { + resp.HeaderEntries = make(map[string]*forwarding.HeaderEntry, len(header)) + for k, v := range header { + resp.HeaderEntries[k] = &forwarding.HeaderEntry{ + Values: v, + } + } + } + + // Performance standby nodes will use this value to do wait for WALs to ship + // in order to do a best-effort read after write guarantee + resp.LastRemoteWal = LastWAL(s.core) + + return resp, nil +} + +func (s *forwardedRequestRPCServer) Echo(ctx context.Context, in *EchoRequest) (*EchoReply, error) { + if in.ClusterAddr != "" { + s.core.clusterPeerClusterAddrsCache.Set(in.ClusterAddr, nil, 0) + } + + if in.RaftAppliedIndex > 0 && len(in.RaftNodeID) > 0 && s.raftFollowerStates != nil { + s.raftFollowerStates.update(in.RaftNodeID, in.RaftAppliedIndex) + } + + reply := &EchoReply{ + Message: "pong", + ReplicationState: uint32(s.core.ReplicationState()), + } + + if raftStorage, ok := s.core.underlyingPhysical.(*raft.RaftBackend); ok { + reply.RaftAppliedIndex = raftStorage.AppliedIndex() + reply.RaftNodeID = raftStorage.NodeID() + } + + return reply, nil +} + +type forwardingClient struct { + RequestForwardingClient + + core *Core + + echoTicker *time.Ticker + echoContext context.Context +} + +// NOTE: we also take advantage of gRPC's keepalive bits, but as we send data +// with these requests it's useful to keep this as well +func (c *forwardingClient) startHeartbeat() { + go func() { + tick := func() { + c.core.stateLock.RLock() + clusterAddr := c.core.ClusterAddr() + c.core.stateLock.RUnlock() + + req := &EchoRequest{ + Message: "ping", + ClusterAddr: clusterAddr, + } + + if raftStorage, ok := c.core.underlyingPhysical.(*raft.RaftBackend); ok { + req.RaftAppliedIndex = raftStorage.AppliedIndex() + req.RaftNodeID = raftStorage.NodeID() + } + + ctx, cancel := context.WithTimeout(c.echoContext, 2*time.Second) + resp, err := c.RequestForwardingClient.Echo(ctx, req) + cancel() + if err != nil { + c.core.logger.Debug("forwarding: error sending echo request to active node", "error", err) + return + } + if resp == nil { + c.core.logger.Debug("forwarding: empty echo response from active node") + return + } + if resp.Message != "pong" { + c.core.logger.Debug("forwarding: unexpected echo response from active node", "message", resp.Message) + return + } + // Store the active node's replication state to display in + // sys/health calls + atomic.StoreUint32(c.core.activeNodeReplicationState, resp.ReplicationState) + } + + tick() + + for { + select { + case <-c.echoContext.Done(): + c.echoTicker.Stop() + c.core.logger.Debug("forwarding: stopping heartbeating") + atomic.StoreUint32(c.core.activeNodeReplicationState, uint32(consts.ReplicationUnknown)) + return + case <-c.echoTicker.C: + tick() + } + } + }() +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go new file mode 100644 index 00000000..f4cd607d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go @@ -0,0 +1,17 @@ +// +build !enterprise + +package vault + +import ( + "context" +) + +func (s *forwardedRequestRPCServer) PerformanceStandbyElectionRequest(in *PerfStandbyElectionInput, reqServ RequestForwarding_PerformanceStandbyElectionRequestServer) error { + return nil +} + +type ReplicationTokenInfo struct{} + +func (c *forwardingClient) PerformanceStandbyElection(ctx context.Context) (*ReplicationTokenInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go new file mode 100644 index 00000000..5077c50e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go @@ -0,0 +1,576 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vault/request_forwarding_service.proto + +package vault + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + forwarding "github.com/hashicorp/vault/helper/forwarding" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type EchoRequest struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + // ClusterAddr is used to send up a standby node's address to the active + // node upon heartbeat + ClusterAddr string `protobuf:"bytes,2,opt,name=cluster_addr,json=clusterAddr,proto3" json:"cluster_addr,omitempty"` + // ClusterAddrs is used to send up a list of cluster addresses to a dr + // primary from a dr secondary + ClusterAddrs []string `protobuf:"bytes,3,rep,name=cluster_addrs,json=clusterAddrs,proto3" json:"cluster_addrs,omitempty"` + RaftAppliedIndex uint64 `protobuf:"varint,4,opt,name=raft_applied_index,json=raftAppliedIndex,proto3" json:"raft_applied_index,omitempty"` + RaftNodeID string `protobuf:"bytes,5,opt,name=raft_node_id,json=raftNodeId,proto3" json:"raft_node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EchoRequest) Reset() { *m = EchoRequest{} } +func (m *EchoRequest) String() string { return proto.CompactTextString(m) } +func (*EchoRequest) ProtoMessage() {} +func (*EchoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f5f7512e4ab7b58a, []int{0} +} + +func (m *EchoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EchoRequest.Unmarshal(m, b) +} +func (m *EchoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EchoRequest.Marshal(b, m, deterministic) +} +func (m *EchoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EchoRequest.Merge(m, src) +} +func (m *EchoRequest) XXX_Size() int { + return xxx_messageInfo_EchoRequest.Size(m) +} +func (m *EchoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EchoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EchoRequest proto.InternalMessageInfo + +func (m *EchoRequest) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *EchoRequest) GetClusterAddr() string { + if m != nil { + return m.ClusterAddr + } + return "" +} + +func (m *EchoRequest) GetClusterAddrs() []string { + if m != nil { + return m.ClusterAddrs + } + return nil +} + +func (m *EchoRequest) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *EchoRequest) GetRaftNodeID() string { + if m != nil { + return m.RaftNodeID + } + return "" +} + +type EchoReply struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + ClusterAddrs []string `protobuf:"bytes,2,rep,name=cluster_addrs,json=clusterAddrs,proto3" json:"cluster_addrs,omitempty"` + ReplicationState uint32 `protobuf:"varint,3,opt,name=replication_state,json=replicationState,proto3" json:"replication_state,omitempty"` + RaftAppliedIndex uint64 `protobuf:"varint,4,opt,name=raft_applied_index,json=raftAppliedIndex,proto3" json:"raft_applied_index,omitempty"` + RaftNodeID string `protobuf:"bytes,5,opt,name=raft_node_id,json=raftNodeId,proto3" json:"raft_node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EchoReply) Reset() { *m = EchoReply{} } +func (m *EchoReply) String() string { return proto.CompactTextString(m) } +func (*EchoReply) ProtoMessage() {} +func (*EchoReply) Descriptor() ([]byte, []int) { + return fileDescriptor_f5f7512e4ab7b58a, []int{1} +} + +func (m *EchoReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EchoReply.Unmarshal(m, b) +} +func (m *EchoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EchoReply.Marshal(b, m, deterministic) +} +func (m *EchoReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_EchoReply.Merge(m, src) +} +func (m *EchoReply) XXX_Size() int { + return xxx_messageInfo_EchoReply.Size(m) +} +func (m *EchoReply) XXX_DiscardUnknown() { + xxx_messageInfo_EchoReply.DiscardUnknown(m) +} + +var xxx_messageInfo_EchoReply proto.InternalMessageInfo + +func (m *EchoReply) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *EchoReply) GetClusterAddrs() []string { + if m != nil { + return m.ClusterAddrs + } + return nil +} + +func (m *EchoReply) GetReplicationState() uint32 { + if m != nil { + return m.ReplicationState + } + return 0 +} + +func (m *EchoReply) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *EchoReply) GetRaftNodeID() string { + if m != nil { + return m.RaftNodeID + } + return "" +} + +type ClientKey struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + X []byte `protobuf:"bytes,2,opt,name=x,proto3" json:"x,omitempty"` + Y []byte `protobuf:"bytes,3,opt,name=y,proto3" json:"y,omitempty"` + D []byte `protobuf:"bytes,4,opt,name=d,proto3" json:"d,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientKey) Reset() { *m = ClientKey{} } +func (m *ClientKey) String() string { return proto.CompactTextString(m) } +func (*ClientKey) ProtoMessage() {} +func (*ClientKey) Descriptor() ([]byte, []int) { + return fileDescriptor_f5f7512e4ab7b58a, []int{2} +} + +func (m *ClientKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientKey.Unmarshal(m, b) +} +func (m *ClientKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientKey.Marshal(b, m, deterministic) +} +func (m *ClientKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientKey.Merge(m, src) +} +func (m *ClientKey) XXX_Size() int { + return xxx_messageInfo_ClientKey.Size(m) +} +func (m *ClientKey) XXX_DiscardUnknown() { + xxx_messageInfo_ClientKey.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientKey proto.InternalMessageInfo + +func (m *ClientKey) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ClientKey) GetX() []byte { + if m != nil { + return m.X + } + return nil +} + +func (m *ClientKey) GetY() []byte { + if m != nil { + return m.Y + } + return nil +} + +func (m *ClientKey) GetD() []byte { + if m != nil { + return m.D + } + return nil +} + +type PerfStandbyElectionInput struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PerfStandbyElectionInput) Reset() { *m = PerfStandbyElectionInput{} } +func (m *PerfStandbyElectionInput) String() string { return proto.CompactTextString(m) } +func (*PerfStandbyElectionInput) ProtoMessage() {} +func (*PerfStandbyElectionInput) Descriptor() ([]byte, []int) { + return fileDescriptor_f5f7512e4ab7b58a, []int{3} +} + +func (m *PerfStandbyElectionInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PerfStandbyElectionInput.Unmarshal(m, b) +} +func (m *PerfStandbyElectionInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PerfStandbyElectionInput.Marshal(b, m, deterministic) +} +func (m *PerfStandbyElectionInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_PerfStandbyElectionInput.Merge(m, src) +} +func (m *PerfStandbyElectionInput) XXX_Size() int { + return xxx_messageInfo_PerfStandbyElectionInput.Size(m) +} +func (m *PerfStandbyElectionInput) XXX_DiscardUnknown() { + xxx_messageInfo_PerfStandbyElectionInput.DiscardUnknown(m) +} + +var xxx_messageInfo_PerfStandbyElectionInput proto.InternalMessageInfo + +type PerfStandbyElectionResponse struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ClusterID string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PrimaryClusterAddr string `protobuf:"bytes,3,opt,name=primary_cluster_addr,json=primaryClusterAddr,proto3" json:"primary_cluster_addr,omitempty"` + CaCert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + ClientCert []byte `protobuf:"bytes,5,opt,name=client_cert,json=clientCert,proto3" json:"client_cert,omitempty"` + ClientKey *ClientKey `protobuf:"bytes,6,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PerfStandbyElectionResponse) Reset() { *m = PerfStandbyElectionResponse{} } +func (m *PerfStandbyElectionResponse) String() string { return proto.CompactTextString(m) } +func (*PerfStandbyElectionResponse) ProtoMessage() {} +func (*PerfStandbyElectionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f5f7512e4ab7b58a, []int{4} +} + +func (m *PerfStandbyElectionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PerfStandbyElectionResponse.Unmarshal(m, b) +} +func (m *PerfStandbyElectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PerfStandbyElectionResponse.Marshal(b, m, deterministic) +} +func (m *PerfStandbyElectionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PerfStandbyElectionResponse.Merge(m, src) +} +func (m *PerfStandbyElectionResponse) XXX_Size() int { + return xxx_messageInfo_PerfStandbyElectionResponse.Size(m) +} +func (m *PerfStandbyElectionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PerfStandbyElectionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PerfStandbyElectionResponse proto.InternalMessageInfo + +func (m *PerfStandbyElectionResponse) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *PerfStandbyElectionResponse) GetClusterID() string { + if m != nil { + return m.ClusterID + } + return "" +} + +func (m *PerfStandbyElectionResponse) GetPrimaryClusterAddr() string { + if m != nil { + return m.PrimaryClusterAddr + } + return "" +} + +func (m *PerfStandbyElectionResponse) GetCaCert() []byte { + if m != nil { + return m.CaCert + } + return nil +} + +func (m *PerfStandbyElectionResponse) GetClientCert() []byte { + if m != nil { + return m.ClientCert + } + return nil +} + +func (m *PerfStandbyElectionResponse) GetClientKey() *ClientKey { + if m != nil { + return m.ClientKey + } + return nil +} + +func init() { + proto.RegisterType((*EchoRequest)(nil), "vault.EchoRequest") + proto.RegisterType((*EchoReply)(nil), "vault.EchoReply") + proto.RegisterType((*ClientKey)(nil), "vault.ClientKey") + proto.RegisterType((*PerfStandbyElectionInput)(nil), "vault.PerfStandbyElectionInput") + proto.RegisterType((*PerfStandbyElectionResponse)(nil), "vault.PerfStandbyElectionResponse") +} + +func init() { + proto.RegisterFile("vault/request_forwarding_service.proto", fileDescriptor_f5f7512e4ab7b58a) +} + +var fileDescriptor_f5f7512e4ab7b58a = []byte{ + // 552 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x51, 0x6b, 0xdb, 0x3c, + 0x14, 0xad, 0x92, 0xb4, 0x25, 0x37, 0x6e, 0x49, 0xf5, 0x15, 0x3e, 0x93, 0x51, 0xea, 0x7a, 0x30, + 0x02, 0x1b, 0x76, 0xe9, 0x9e, 0xf7, 0xd0, 0x95, 0x0e, 0xc2, 0x60, 0x0c, 0xf7, 0x6d, 0x2f, 0x46, + 0x95, 0x6e, 0x13, 0x31, 0xc7, 0xd6, 0x24, 0xa5, 0x8b, 0x7f, 0xdd, 0x1e, 0xf7, 0x5b, 0xf6, 0xb6, + 0x9f, 0x30, 0x2c, 0x2b, 0x4d, 0x42, 0xdb, 0x3d, 0xed, 0x25, 0xe8, 0x9e, 0x73, 0x92, 0x7b, 0x74, + 0x74, 0x6f, 0xe0, 0xd5, 0x3d, 0x5b, 0x14, 0x36, 0xd5, 0xf8, 0x6d, 0x81, 0xc6, 0xe6, 0x77, 0x95, + 0xfe, 0xce, 0xb4, 0x90, 0xe5, 0x34, 0x37, 0xa8, 0xef, 0x25, 0xc7, 0x44, 0xe9, 0xca, 0x56, 0x74, + 0xd7, 0xe9, 0x46, 0x27, 0x33, 0x2c, 0x14, 0xea, 0x74, 0xad, 0x4b, 0x6d, 0xad, 0xd0, 0xb4, 0xaa, + 0xf8, 0x07, 0x81, 0xc1, 0x35, 0x9f, 0x55, 0x59, 0xfb, 0x73, 0x34, 0x84, 0xfd, 0x39, 0x1a, 0xc3, + 0xa6, 0x18, 0x92, 0x88, 0x8c, 0xfb, 0xd9, 0xaa, 0xa4, 0x67, 0x10, 0xf0, 0x62, 0x61, 0x2c, 0xea, + 0x9c, 0x09, 0xa1, 0xc3, 0x8e, 0xa3, 0x07, 0x1e, 0xbb, 0x14, 0x42, 0xd3, 0x97, 0x70, 0xb0, 0x29, + 0x31, 0x61, 0x37, 0xea, 0x8e, 0xfb, 0x59, 0xb0, 0xa1, 0x31, 0xf4, 0x0d, 0x50, 0xcd, 0xee, 0x6c, + 0xce, 0x94, 0x2a, 0x24, 0x8a, 0x5c, 0x96, 0x02, 0x97, 0x61, 0x2f, 0x22, 0xe3, 0x5e, 0x36, 0x6c, + 0x98, 0xcb, 0x96, 0x98, 0x34, 0x38, 0x8d, 0x20, 0x70, 0xea, 0xb2, 0x12, 0x98, 0x4b, 0x11, 0xee, + 0xba, 0xae, 0xd0, 0x60, 0x9f, 0x2a, 0x81, 0x13, 0x11, 0xff, 0x24, 0xd0, 0x6f, 0x6f, 0xa0, 0x8a, + 0xfa, 0x2f, 0xfe, 0x1f, 0x99, 0xeb, 0x3c, 0x61, 0xee, 0x35, 0x1c, 0x69, 0x54, 0x85, 0xe4, 0xcc, + 0xca, 0xaa, 0xcc, 0x8d, 0x65, 0x16, 0xc3, 0x6e, 0x44, 0xc6, 0x07, 0xd9, 0x70, 0x83, 0xb8, 0x69, + 0xf0, 0x7f, 0x7e, 0x93, 0x09, 0xf4, 0xaf, 0x0a, 0x89, 0xa5, 0xfd, 0x88, 0x35, 0xa5, 0xd0, 0x6b, + 0xde, 0xc9, 0xdf, 0xc2, 0x9d, 0x69, 0x00, 0x64, 0xe9, 0x72, 0x0f, 0x32, 0xb2, 0x6c, 0xaa, 0xda, + 0x79, 0x0b, 0x32, 0x52, 0x37, 0x95, 0x70, 0xbd, 0x83, 0x8c, 0x88, 0x78, 0x04, 0xe1, 0x67, 0xd4, + 0x77, 0x37, 0x96, 0x95, 0xe2, 0xb6, 0xbe, 0x2e, 0x90, 0x37, 0xb6, 0x27, 0xa5, 0x5a, 0xd8, 0xf8, + 0x17, 0x81, 0x17, 0x4f, 0x90, 0x19, 0x1a, 0x55, 0x95, 0x06, 0xe9, 0x21, 0x74, 0xa4, 0xf0, 0x7d, + 0x3b, 0x52, 0xd0, 0x13, 0x80, 0x55, 0x70, 0x52, 0xf8, 0x67, 0xef, 0x7b, 0x64, 0x22, 0xe8, 0x39, + 0x1c, 0x2b, 0x2d, 0xe7, 0x4c, 0xd7, 0xf9, 0xd6, 0x7c, 0x74, 0x9d, 0x90, 0x7a, 0xee, 0x6a, 0x63, + 0x4c, 0xfe, 0x87, 0x7d, 0xce, 0x72, 0x8e, 0xda, 0x7a, 0xc3, 0x7b, 0x9c, 0x5d, 0xa1, 0xb6, 0xf4, + 0x14, 0x06, 0xdc, 0x05, 0xd0, 0x92, 0xbb, 0x8e, 0x84, 0x16, 0x72, 0x82, 0x14, 0x7c, 0x95, 0x7f, + 0xc5, 0x3a, 0xdc, 0x8b, 0xc8, 0x78, 0x70, 0x31, 0x4c, 0xdc, 0xa0, 0x27, 0x0f, 0xd1, 0x35, 0xe6, + 0xfc, 0xf1, 0xe2, 0x37, 0x81, 0x23, 0x3f, 0xda, 0x1f, 0x1e, 0x16, 0x80, 0xbe, 0x83, 0x43, 0x5f, + 0xad, 0xc6, 0xfe, 0xbf, 0x64, 0xbd, 0x1f, 0x89, 0x07, 0x47, 0xc7, 0xdb, 0x60, 0x1b, 0x4f, 0xbc, + 0x43, 0x13, 0xe8, 0x35, 0x03, 0x47, 0xa9, 0xef, 0xbc, 0xb1, 0x3f, 0xa3, 0xe1, 0x16, 0xa6, 0x8a, + 0x3a, 0xde, 0xa1, 0x05, 0x9c, 0x35, 0x79, 0x57, 0x7a, 0xce, 0x4a, 0x8e, 0x8f, 0x62, 0x6f, 0x1d, + 0x9c, 0xfa, 0x2f, 0x3e, 0xf7, 0x6c, 0xa3, 0xf8, 0x79, 0xc1, 0xda, 0xdb, 0x39, 0x79, 0x1f, 0x7f, + 0x89, 0xa6, 0xd2, 0xce, 0x16, 0xb7, 0x09, 0xaf, 0xe6, 0xe9, 0x8c, 0x99, 0x99, 0xe4, 0x95, 0x56, + 0x69, 0xfb, 0xb7, 0xe1, 0x3e, 0x6f, 0xf7, 0xdc, 0xf2, 0xbf, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x67, 0xc6, 0xa7, 0xe1, 0x4c, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RequestForwardingClient is the client API for RequestForwarding service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RequestForwardingClient interface { + ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) + Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) + PerformanceStandbyElectionRequest(ctx context.Context, in *PerfStandbyElectionInput, opts ...grpc.CallOption) (RequestForwarding_PerformanceStandbyElectionRequestClient, error) +} + +type requestForwardingClient struct { + cc *grpc.ClientConn +} + +func NewRequestForwardingClient(cc *grpc.ClientConn) RequestForwardingClient { + return &requestForwardingClient{cc} +} + +func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) { + out := new(forwarding.Response) + err := c.cc.Invoke(ctx, "/vault.RequestForwarding/ForwardRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *requestForwardingClient) Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) { + out := new(EchoReply) + err := c.cc.Invoke(ctx, "/vault.RequestForwarding/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *requestForwardingClient) PerformanceStandbyElectionRequest(ctx context.Context, in *PerfStandbyElectionInput, opts ...grpc.CallOption) (RequestForwarding_PerformanceStandbyElectionRequestClient, error) { + stream, err := c.cc.NewStream(ctx, &_RequestForwarding_serviceDesc.Streams[0], "/vault.RequestForwarding/PerformanceStandbyElectionRequest", opts...) + if err != nil { + return nil, err + } + x := &requestForwardingPerformanceStandbyElectionRequestClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RequestForwarding_PerformanceStandbyElectionRequestClient interface { + Recv() (*PerfStandbyElectionResponse, error) + grpc.ClientStream +} + +type requestForwardingPerformanceStandbyElectionRequestClient struct { + grpc.ClientStream +} + +func (x *requestForwardingPerformanceStandbyElectionRequestClient) Recv() (*PerfStandbyElectionResponse, error) { + m := new(PerfStandbyElectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// RequestForwardingServer is the server API for RequestForwarding service. +type RequestForwardingServer interface { + ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error) + Echo(context.Context, *EchoRequest) (*EchoReply, error) + PerformanceStandbyElectionRequest(*PerfStandbyElectionInput, RequestForwarding_PerformanceStandbyElectionRequestServer) error +} + +// UnimplementedRequestForwardingServer can be embedded to have forward compatible implementations. +type UnimplementedRequestForwardingServer struct { +} + +func (*UnimplementedRequestForwardingServer) ForwardRequest(ctx context.Context, req *forwarding.Request) (*forwarding.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForwardRequest not implemented") +} +func (*UnimplementedRequestForwardingServer) Echo(ctx context.Context, req *EchoRequest) (*EchoReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedRequestForwardingServer) PerformanceStandbyElectionRequest(req *PerfStandbyElectionInput, srv RequestForwarding_PerformanceStandbyElectionRequestServer) error { + return status.Errorf(codes.Unimplemented, "method PerformanceStandbyElectionRequest not implemented") +} + +func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) { + s.RegisterService(&_RequestForwarding_serviceDesc, srv) +} + +func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(forwarding.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RequestForwardingServer).ForwardRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vault.RequestForwarding/ForwardRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RequestForwardingServer).ForwardRequest(ctx, req.(*forwarding.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _RequestForwarding_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EchoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RequestForwardingServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vault.RequestForwarding/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RequestForwardingServer).Echo(ctx, req.(*EchoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RequestForwarding_PerformanceStandbyElectionRequest_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PerfStandbyElectionInput) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RequestForwardingServer).PerformanceStandbyElectionRequest(m, &requestForwardingPerformanceStandbyElectionRequestServer{stream}) +} + +type RequestForwarding_PerformanceStandbyElectionRequestServer interface { + Send(*PerfStandbyElectionResponse) error + grpc.ServerStream +} + +type requestForwardingPerformanceStandbyElectionRequestServer struct { + grpc.ServerStream +} + +func (x *requestForwardingPerformanceStandbyElectionRequestServer) Send(m *PerfStandbyElectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _RequestForwarding_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vault.RequestForwarding", + HandlerType: (*RequestForwardingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ForwardRequest", + Handler: _RequestForwarding_ForwardRequest_Handler, + }, + { + MethodName: "Echo", + Handler: _RequestForwarding_Echo_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PerformanceStandbyElectionRequest", + Handler: _RequestForwarding_PerformanceStandbyElectionRequest_Handler, + ServerStreams: true, + }, + }, + Metadata: "vault/request_forwarding_service.proto", +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto new file mode 100644 index 00000000..90401e23 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/vault"; + +import "helper/forwarding/types.proto"; + +package vault; + +message EchoRequest { + string message = 1; + // ClusterAddr is used to send up a standby node's address to the active + // node upon heartbeat + string cluster_addr = 2; + // ClusterAddrs is used to send up a list of cluster addresses to a dr + // primary from a dr secondary + repeated string cluster_addrs = 3; + + uint64 raft_applied_index = 4; + string raft_node_id = 5; +} + +message EchoReply { + string message = 1; + repeated string cluster_addrs = 2; + uint32 replication_state = 3; + uint64 raft_applied_index = 4; + string raft_node_id = 5; +} + +message ClientKey { + string type = 1; + bytes x = 2; + bytes y = 3; + bytes d = 4; +} + +message PerfStandbyElectionInput {} +message PerfStandbyElectionResponse { + string id = 1; + string cluster_id = 2; + string primary_cluster_addr = 3; + bytes ca_cert = 4; + bytes client_cert = 5; + ClientKey client_key = 6; +} + +service RequestForwarding { + rpc ForwardRequest(forwarding.Request) returns (forwarding.Response) {} + rpc Echo(EchoRequest) returns (EchoReply) {} + rpc PerformanceStandbyElectionRequest(PerfStandbyElectionInput) returns (stream PerfStandbyElectionResponse) {} +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling.go b/vendor/github.com/hashicorp/vault/vault/request_handling.go new file mode 100644 index 00000000..aeedd18b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_handling.go @@ -0,0 +1,1231 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + uberAtomic "go.uber.org/atomic" +) + +const ( + replTimeout = 10 * time.Second +) + +var ( + // DefaultMaxRequestDuration is the amount of time we'll wait for a request + // to complete, unless overridden on a per-handler basis + DefaultMaxRequestDuration = 90 * time.Second + + egpDebugLogging bool +) + +// HandlerProperties is used to seed configuration into a vaulthttp.Handler. +// It's in this package to avoid a circular dependency +type HandlerProperties struct { + Core *Core + MaxRequestSize int64 + MaxRequestDuration time.Duration + DisablePrintableCheck bool + RecoveryMode bool + RecoveryToken *uberAtomic.String + UnauthenticatedMetricsAccess bool +} + +// fetchEntityAndDerivedPolicies returns the entity object for the given entity +// ID. If the entity is merged into a different entity object, the entity into +// which the given entity ID is merged into will be returned. This function +// also returns the cumulative list of policies that the entity is entitled to. +// This list includes the policies from the entity itself and from all the +// groups in which the given entity ID is a member of. +func (c *Core) fetchEntityAndDerivedPolicies(ctx context.Context, tokenNS *namespace.Namespace, entityID string) (*identity.Entity, map[string][]string, error) { + if entityID == "" || c.identityStore == nil { + return nil, nil, nil + } + + //c.logger.Debug("entity set on the token", "entity_id", te.EntityID) + + // Fetch the entity + entity, err := c.identityStore.MemDBEntityByID(entityID, false) + if err != nil { + c.logger.Error("failed to lookup entity using its ID", "error", err) + return nil, nil, err + } + + if entity == nil { + // If there was no corresponding entity object found, it is + // possible that the entity got merged into another entity. Try + // finding entity based on the merged entity index. + entity, err = c.identityStore.MemDBEntityByMergedEntityID(entityID, false) + if err != nil { + c.logger.Error("failed to lookup entity in merged entity ID index", "error", err) + return nil, nil, err + } + } + + policies := make(map[string][]string) + if entity != nil { + //c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL") + + // Attach the policies on the entity + if len(entity.Policies) != 0 { + policies[entity.NamespaceID] = append(policies[entity.NamespaceID], entity.Policies...) + } + + groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID) + if err != nil { + c.logger.Error("failed to fetch group policies", "error", err) + return nil, nil, err + } + + // Filter and add the policies to the resultant set + for nsID, nsPolicies := range groupPolicies { + ns, err := NamespaceByID(ctx, nsID, c) + if err != nil { + return nil, nil, err + } + if ns == nil { + return nil, nil, namespace.ErrNoNamespace + } + if tokenNS.Path != ns.Path && !ns.HasParent(tokenNS) { + continue + } + nsPolicies = strutil.RemoveDuplicates(nsPolicies, false) + if len(nsPolicies) != 0 { + policies[nsID] = append(policies[nsID], nsPolicies...) + } + } + } + + return entity, policies, err +} + +func (c *Core) fetchACLTokenEntryAndEntity(ctx context.Context, req *logical.Request) (*ACL, *logical.TokenEntry, *identity.Entity, map[string][]string, error) { + defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now()) + + // Ensure there is a client token + if req.ClientToken == "" { + return nil, nil, nil, nil, fmt.Errorf("missing client token") + } + + if c.tokenStore == nil { + c.logger.Error("token store is unavailable") + return nil, nil, nil, nil, ErrInternalError + } + + // Resolve the token policy + var te *logical.TokenEntry + switch req.TokenEntry() { + case nil: + var err error + te, err = c.tokenStore.Lookup(ctx, req.ClientToken) + if err != nil { + c.logger.Error("failed to lookup token", "error", err) + return nil, nil, nil, nil, ErrInternalError + } + // Set the token entry here since it has not been cached yet + req.SetTokenEntry(te) + default: + te = req.TokenEntry() + } + + // Ensure the token is valid + if te == nil { + return nil, nil, nil, nil, logical.ErrPermissionDenied + } + + // CIDR checks bind all tokens except non-expiring root tokens + if te.TTL != 0 && len(te.BoundCIDRs) > 0 { + var valid bool + remoteSockAddr, err := sockaddr.NewSockAddr(req.Connection.RemoteAddr) + if err != nil { + if c.Logger().IsDebug() { + c.Logger().Debug("could not parse remote addr into sockaddr", "error", err, "remote_addr", req.Connection.RemoteAddr) + } + return nil, nil, nil, nil, logical.ErrPermissionDenied + } + for _, cidr := range te.BoundCIDRs { + if cidr.Contains(remoteSockAddr) { + valid = true + break + } + } + if !valid { + return nil, nil, nil, nil, logical.ErrPermissionDenied + } + } + + policies := make(map[string][]string) + // Add tokens policies + policies[te.NamespaceID] = append(policies[te.NamespaceID], te.Policies...) + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c) + if err != nil { + c.logger.Error("failed to fetch token namespace", "error", err) + return nil, nil, nil, nil, ErrInternalError + } + if tokenNS == nil { + c.logger.Error("failed to fetch token namespace", "error", namespace.ErrNoNamespace) + return nil, nil, nil, nil, ErrInternalError + } + + // Add identity policies from all the namespaces + entity, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID) + if err != nil { + return nil, nil, nil, nil, ErrInternalError + } + for nsID, nsPolicies := range identityPolicies { + policies[nsID] = append(policies[nsID], nsPolicies...) + } + + // Attach token's namespace information to the context. Wrapping tokens by + // should be able to be used anywhere, so we also special case behavior. + var tokenCtx context.Context + if len(policies) == 1 && + len(policies[te.NamespaceID]) == 1 && + (policies[te.NamespaceID][0] == responseWrappingPolicyName || + policies[te.NamespaceID][0] == controlGroupPolicyName) && + (strings.HasSuffix(req.Path, "sys/wrapping/unwrap") || + strings.HasSuffix(req.Path, "sys/wrapping/lookup") || + strings.HasSuffix(req.Path, "sys/wrapping/rewrap")) { + // Use the request namespace; will find the copy of the policy for the + // local namespace + tokenCtx = ctx + } else { + // Use the token's namespace for looking up policy + tokenCtx = namespace.ContextWithNamespace(ctx, tokenNS) + } + + // Construct the corresponding ACL object. ACL construction should be + // performed on the token's namespace. + acl, err := c.policyStore.ACL(tokenCtx, entity, policies) + if err != nil { + if errwrap.ContainsType(err, new(TemplateError)) { + c.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err) + return nil, nil, nil, nil, logical.ErrPermissionDenied + } + c.logger.Error("failed to construct ACL", "error", err) + return nil, nil, nil, nil, ErrInternalError + } + + return acl, te, entity, identityPolicies, nil +} + +func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) { + defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now()) + + var acl *ACL + var te *logical.TokenEntry + var entity *identity.Entity + var identityPolicies map[string][]string + var err error + + // Even if unauth, if a token is provided, there's little reason not to + // gather as much info as possible for the audit log and to e.g. control + // trace mode for EGPs. + if !unauth || (unauth && req.ClientToken != "") { + acl, te, entity, identityPolicies, err = c.fetchACLTokenEntryAndEntity(ctx, req) + // In the unauth case we don't want to fail the command, since it's + // unauth, we just have no information to attach to the request, so + // ignore errors...this was best-effort anyways + if err != nil && !unauth { + return nil, te, err + } + } + + if entity != nil && entity.Disabled { + c.logger.Warn("permission denied as the entity on the token is disabled") + return nil, te, logical.ErrPermissionDenied + } + if te != nil && te.EntityID != "" && entity == nil { + if c.perfStandby { + return nil, nil, logical.ErrPerfStandbyPleaseForward + } + c.logger.Warn("permission denied as the entity on the token is invalid") + return nil, te, logical.ErrPermissionDenied + } + + // Check if this is a root protected path + rootPath := c.router.RootPath(ctx, req.Path) + + if rootPath && unauth { + return nil, nil, errors.New("cannot access root path in unauthenticated request") + } + + // At this point we won't be forwarding a raw request; we should delete + // authorization headers as appropriate + switch req.ClientTokenSource { + case logical.ClientTokenFromVaultHeader: + delete(req.Headers, consts.AuthHeaderName) + case logical.ClientTokenFromAuthzHeader: + if headers, ok := req.Headers["Authorization"]; ok { + retHeaders := make([]string, 0, len(headers)) + for _, v := range headers { + if strings.HasPrefix(v, "Bearer ") { + continue + } + retHeaders = append(retHeaders, v) + } + req.Headers["Authorization"] = retHeaders + } + } + + // When we receive a write of either type, rather than require clients to + // PUT/POST and trust the operation, we ask the backend to give us the real + // skinny -- if the backend implements an existence check, it can tell us + // whether a particular resource exists. Then we can mark it as an update + // or creation as appropriate. + if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation { + existsResp, checkExists, resourceExists, err := c.router.RouteExistenceCheck(ctx, req) + switch err { + case logical.ErrUnsupportedPath: + // fail later via bad path to avoid confusing items in the log + checkExists = false + case nil: + if existsResp != nil && existsResp.IsError() { + return nil, te, existsResp.Error() + } + // Otherwise, continue on + default: + c.logger.Error("failed to run existence check", "error", err) + if _, ok := err.(errutil.UserError); ok { + return nil, te, err + } else { + return nil, te, ErrInternalError + } + } + + switch { + case checkExists == false: + // No existence check, so always treat it as an update operation, which is how it is pre 0.5 + req.Operation = logical.UpdateOperation + case resourceExists == true: + // It exists, so force an update operation + req.Operation = logical.UpdateOperation + case resourceExists == false: + // It doesn't exist, force a create operation + req.Operation = logical.CreateOperation + default: + panic("unreachable code") + } + } + // Create the auth response + auth := &logical.Auth{ + ClientToken: req.ClientToken, + Accessor: req.ClientTokenAccessor, + } + + if te != nil { + auth.IdentityPolicies = identityPolicies[te.NamespaceID] + auth.TokenPolicies = te.Policies + auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...) + auth.Metadata = te.Meta + auth.DisplayName = te.DisplayName + auth.EntityID = te.EntityID + delete(identityPolicies, te.NamespaceID) + auth.ExternalNamespacePolicies = identityPolicies + // Store the entity ID in the request object + req.EntityID = te.EntityID + auth.TokenType = te.Type + } + + // Check the standard non-root ACLs. Return the token entry if it's not + // allowed so we can decrement the use count. + authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{ + Unauth: unauth, + RootPrivsRequired: rootPath, + }) + + if !authResults.Allowed { + retErr := authResults.Error + + // If we get a control group error and we are a performance standby, + // restore the client token information to the request so that we can + // forward this request properly to the active node. + if retErr.ErrorOrNil() != nil && checkErrControlGroupTokenNeedsCreated(retErr) && + c.perfStandby && len(req.ClientToken) != 0 { + switch req.ClientTokenSource { + case logical.ClientTokenFromVaultHeader: + req.Headers[consts.AuthHeaderName] = []string{req.ClientToken} + case logical.ClientTokenFromAuthzHeader: + req.Headers["Authorization"] = append(req.Headers["Authorization"], fmt.Sprintf("Bearer %s", req.ClientToken)) + } + // We also return the appropriate error so that the caller can forward the + // request to the active node + return auth, te, logical.ErrPerfStandbyPleaseForward + } + + if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError { + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + } + return auth, te, retErr + } + + return auth, te, nil +} + +// HandleRequest is used to handle a new incoming request +func (c *Core) HandleRequest(httpCtx context.Context, req *logical.Request) (resp *logical.Response, err error) { + return c.switchedLockHandleRequest(httpCtx, req, true) +} + +func (c *Core) switchedLockHandleRequest(httpCtx context.Context, req *logical.Request, doLocking bool) (resp *logical.Response, err error) { + if doLocking { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + } + if c.Sealed() { + return nil, consts.ErrSealed + } + if c.standby && !c.perfStandby { + return nil, consts.ErrStandby + } + + if c.activeContext == nil || c.activeContext.Err() != nil { + return nil, errors.New("active context canceled after getting state lock") + } + + ctx, cancel := context.WithCancel(c.activeContext) + go func(ctx context.Context, httpCtx context.Context) { + select { + case <-ctx.Done(): + case <-httpCtx.Done(): + cancel() + } + }(ctx, httpCtx) + + ns, err := namespace.FromContext(httpCtx) + if err != nil { + cancel() + return nil, errwrap.Wrapf("could not parse namespace from http context: {{err}}", err) + } + ctx = namespace.ContextWithNamespace(ctx, ns) + + resp, err = c.handleCancelableRequest(ctx, ns, req) + + req.SetTokenEntry(nil) + cancel() + return resp, err +} + +func (c *Core) handleCancelableRequest(ctx context.Context, ns *namespace.Namespace, req *logical.Request) (resp *logical.Response, err error) { + // Allowing writing to a path ending in / makes it extremely difficult to + // understand user intent for the filesystem-like backends (kv, + // cubbyhole) -- did they want a key named foo/ or did they want to write + // to a directory foo/ with no (or forgotten) key, or...? It also affects + // lookup, because paths ending in / are considered prefixes by some + // backends. Basically, it's all just terrible, so don't allow it. + if strings.HasSuffix(req.Path, "/") && + (req.Operation == logical.UpdateOperation || + req.Operation == logical.CreateOperation) { + return logical.ErrorResponse("cannot write to a path ending in '/'"), nil + } + + err = waitForReplicationState(ctx, c, req) + if err != nil { + return nil, err + } + + if !hasNamespaces(c) && ns.Path != "" { + return nil, logical.CodedError(403, "namespaces feature not enabled") + } + + var auth *logical.Auth + if c.router.LoginPath(ctx, req.Path) { + resp, auth, err = c.handleLoginRequest(ctx, req) + } else { + resp, auth, err = c.handleRequest(ctx, req) + } + + // Ensure we don't leak internal data + if resp != nil { + if resp.Secret != nil { + resp.Secret.InternalData = nil + } + if resp.Auth != nil { + resp.Auth.InternalData = nil + } + } + + // We are wrapping if there is anything to wrap (not a nil response) and a + // TTL was specified for the token. Errors on a call should be returned to + // the caller, so wrapping is turned off if an error is hit and the error + // is logged to the audit log. + wrapping := resp != nil && + err == nil && + !resp.IsError() && + resp.WrapInfo != nil && + resp.WrapInfo.TTL != 0 && + resp.WrapInfo.Token == "" + + if wrapping { + cubbyResp, cubbyErr := c.wrapInCubbyhole(ctx, req, resp, auth) + // If not successful, returns either an error response from the + // cubbyhole backend or an error; if either is set, set resp and err to + // those and continue so that that's what we audit log. Otherwise + // finish the wrapping and audit log that. + if cubbyResp != nil || cubbyErr != nil { + resp = cubbyResp + err = cubbyErr + } else { + wrappingResp := &logical.Response{ + WrapInfo: resp.WrapInfo, + Warnings: resp.Warnings, + } + resp = wrappingResp + } + } + + auditResp := resp + // When unwrapping we want to log the actual response that will be written + // out. We still want to return the raw value to avoid automatic updating + // to any of it. + if req.Path == "sys/wrapping/unwrap" && + resp != nil && + resp.Data != nil && + resp.Data[logical.HTTPRawBody] != nil { + + // Decode the JSON + if resp.Data[logical.HTTPRawBodyAlreadyJSONDecoded] != nil { + delete(resp.Data, logical.HTTPRawBodyAlreadyJSONDecoded) + } else { + httpResp := &logical.HTTPResponse{} + err := jsonutil.DecodeJSON(resp.Data[logical.HTTPRawBody].([]byte), httpResp) + if err != nil { + c.logger.Error("failed to unmarshal wrapped HTTP response for audit logging", "error", err) + return nil, ErrInternalError + } + + auditResp = logical.HTTPResponseToLogicalResponse(httpResp) + } + } + + var nonHMACReqDataKeys []string + var nonHMACRespDataKeys []string + entry := c.router.MatchingMountEntry(ctx, req.Path) + if entry != nil { + // Get and set ignored HMAC'd value. Reset those back to empty afterwards. + if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok { + nonHMACReqDataKeys = rawVals.([]string) + } + + // Get and set ignored HMAC'd value. Reset those back to empty afterwards. + if auditResp != nil { + if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok { + nonHMACRespDataKeys = rawVals.([]string) + } + } + } + + // Create an audit trail of the response + if !isControlGroupRun(req) { + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + Response: auditResp, + OuterErr: err, + NonHMACReqDataKeys: nonHMACReqDataKeys, + NonHMACRespDataKeys: nonHMACRespDataKeys, + } + if auditErr := c.auditBroker.LogResponse(ctx, logInput, c.auditedHeaders); auditErr != nil { + c.logger.Error("failed to audit response", "request_path", req.Path, "error", auditErr) + return nil, ErrInternalError + } + } + + return +} + +func isControlGroupRun(req *logical.Request) bool { + return req.ControlGroup != nil +} + +func (c *Core) doRouting(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // If we're replicating and we get a read-only error from a backend, need to forward to primary + resp, err := c.router.Route(ctx, req) + if shouldForward(c, resp, err) { + return forward(ctx, c, req) + } + atomic.AddUint64(c.counters.requests, 1) + return resp, err +} + +func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) { + defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now()) + + var nonHMACReqDataKeys []string + entry := c.router.MatchingMountEntry(ctx, req.Path) + if entry != nil { + // Get and set ignored HMAC'd value. + if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok { + nonHMACReqDataKeys = rawVals.([]string) + } + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + c.logger.Error("failed to get namespace from context", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return + } + + // Validate the token + auth, te, ctErr := c.checkToken(ctx, req, false) + if ctErr == logical.ErrPerfStandbyPleaseForward { + return nil, nil, ctErr + } + + // We run this logic first because we want to decrement the use count even + // in the case of an error (assuming we can successfully look up; if we + // need to forward, we exit before now) + if te != nil && !isControlGroupRun(req) { + // Attempt to use the token (decrement NumUses) + var err error + te, err = c.tokenStore.UseToken(ctx, te) + if err != nil { + c.logger.Error("failed to use token", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, nil, retErr + } + if te == nil { + // Token has been revoked by this point + retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + return nil, nil, retErr + } + if te.NumUses == tokenRevocationPending { + // We defer a revocation until after logic has run, since this is a + // valid request (this is the token's final use). We pass the ID in + // directly just to be safe in case something else modifies te later. + defer func(id string) { + nsActiveCtx := namespace.ContextWithNamespace(c.activeContext, ns) + leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(nsActiveCtx, te) + if err == nil { + err = c.expiration.LazyRevoke(ctx, leaseID) + } + if err != nil { + c.logger.Error("failed to revoke token", "error", err) + retResp = nil + retAuth = nil + retErr = multierror.Append(retErr, ErrInternalError) + } + if retResp != nil && retResp.Secret != nil && + // Some backends return a TTL even without a Lease ID + retResp.Secret.LeaseID != "" { + retResp = logical.ErrorResponse("Secret cannot be returned; token had one use left, so leased credentials were immediately revoked.") + return + } + }(te.ID) + } + } + + if ctErr != nil { + newCtErr, cgResp, cgAuth, cgRetErr := checkNeedsCG(ctx, c, req, auth, ctErr, nonHMACReqDataKeys) + switch { + case newCtErr != nil: + ctErr = newCtErr + case cgResp != nil || cgAuth != nil: + if cgRetErr != nil { + retErr = multierror.Append(retErr, cgRetErr) + } + return cgResp, cgAuth, retErr + } + + // If it is an internal error we return that, otherwise we + // return invalid request so that the status codes can be correct + switch { + case ctErr == ErrInternalError, + errwrap.Contains(ctErr, ErrInternalError.Error()), + ctErr == logical.ErrPermissionDenied, + errwrap.Contains(ctErr, logical.ErrPermissionDenied.Error()): + switch ctErr.(type) { + case *multierror.Error: + retErr = ctErr + default: + retErr = multierror.Append(retErr, ctErr) + } + default: + retErr = multierror.Append(retErr, logical.ErrInvalidRequest) + } + + if !isControlGroupRun(req) { + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + OuterErr: ctErr, + NonHMACReqDataKeys: nonHMACReqDataKeys, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "path", req.Path, "error", err) + } + } + + if errwrap.Contains(retErr, ErrInternalError.Error()) { + return nil, auth, retErr + } + return logical.ErrorResponse(ctErr.Error()), auth, retErr + } + + // Attach the display name + req.DisplayName = auth.DisplayName + + // Create an audit trail of the request + if !isControlGroupRun(req) { + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + NonHMACReqDataKeys: nonHMACReqDataKeys, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "path", req.Path, "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + } + + // Route the request + resp, routeErr := c.doRouting(ctx, req) + if resp != nil { + + // If wrapping is used, use the shortest between the request and response + var wrapTTL time.Duration + var wrapFormat, creationPath string + var sealWrap bool + + // Ensure no wrap info information is set other than, possibly, the TTL + if resp.WrapInfo != nil { + if resp.WrapInfo.TTL > 0 { + wrapTTL = resp.WrapInfo.TTL + } + wrapFormat = resp.WrapInfo.Format + creationPath = resp.WrapInfo.CreationPath + sealWrap = resp.WrapInfo.SealWrap + resp.WrapInfo = nil + } + + if req.WrapInfo != nil { + if req.WrapInfo.TTL > 0 { + switch { + case wrapTTL == 0: + wrapTTL = req.WrapInfo.TTL + case req.WrapInfo.TTL < wrapTTL: + wrapTTL = req.WrapInfo.TTL + } + } + // If the wrap format hasn't been set by the response, set it to + // the request format + if req.WrapInfo.Format != "" && wrapFormat == "" { + wrapFormat = req.WrapInfo.Format + } + } + + if wrapTTL > 0 { + resp.WrapInfo = &wrapping.ResponseWrapInfo{ + TTL: wrapTTL, + Format: wrapFormat, + CreationPath: creationPath, + SealWrap: sealWrap, + } + } + } + + // If there is a secret, we must register it with the expiration manager. + // We exclude renewal of a lease, since it does not need to be re-registered + if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") && + !strings.HasPrefix(req.Path, "sys/leases/renew") { + // KV mounts should return the TTL but not register + // for a lease as this provides a massive slowdown + registerLease := true + + matchingMountEntry := c.router.MatchingMountEntry(ctx, req.Path) + if matchingMountEntry == nil { + c.logger.Error("unable to retrieve kv mount entry from router") + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + + switch matchingMountEntry.Type { + case "kv", "generic": + // If we are kv type, first see if we are an older passthrough + // backend, and otherwise check the mount entry options. + matchingBackend := c.router.MatchingBackend(ctx, req.Path) + if matchingBackend == nil { + c.logger.Error("unable to retrieve kv backend from router") + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + + if ptbe, ok := matchingBackend.(*PassthroughBackend); ok { + if !ptbe.GeneratesLeases() { + registerLease = false + resp.Secret.Renewable = false + } + } else if matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true" { + registerLease = false + resp.Secret.Renewable = false + } + + case "plugin": + // If we are a plugin type and the plugin name is "kv" check the + // mount entry options. + if matchingMountEntry.Config.PluginName == "kv" && (matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true") { + registerLease = false + resp.Secret.Renewable = false + } + } + + if registerLease { + sysView := c.router.MatchingSystemView(ctx, req.Path) + if sysView == nil { + c.logger.Error("unable to look up sys view for login path", "request_path", req.Path) + return nil, nil, ErrInternalError + } + + ttl, warnings, err := framework.CalculateTTL(sysView, 0, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, time.Time{}) + if err != nil { + return nil, nil, err + } + for _, warning := range warnings { + resp.AddWarning(warning) + } + resp.Secret.TTL = ttl + + registerFunc, funcGetErr := getLeaseRegisterFunc(c) + if funcGetErr != nil { + retErr = multierror.Append(retErr, funcGetErr) + return nil, auth, retErr + } + + leaseID, err := registerFunc(ctx, req, resp) + if err != nil { + c.logger.Error("failed to register lease", "request_path", req.Path, "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + resp.Secret.LeaseID = leaseID + + // Get the actual time of the lease + le, err := c.expiration.FetchLeaseTimes(ctx, leaseID) + if err != nil { + c.logger.Error("failed to fetch updated lease time", "request_path", req.Path, "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + // We round here because the clock will have already started + // ticking, so we'll end up always returning 299 instead of 300 or + // 26399 instead of 26400, say, even if it's just a few + // microseconds. This provides a nicer UX. + resp.Secret.TTL = le.ExpireTime.Sub(time.Now()).Round(time.Second) + } + } + + // Only the token store is allowed to return an auth block, for any + // other request this is an internal error. We exclude renewal of a token, + // since it does not need to be re-registered + if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew") { + if !strings.HasPrefix(req.Path, "auth/token/") { + c.logger.Error("unexpected Auth response for non-token backend", "request_path", req.Path) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + + // Fetch the namespace to which the token belongs + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c) + if err != nil { + c.logger.Error("failed to fetch token's namespace", "error", err) + retErr = multierror.Append(retErr, err) + return nil, auth, retErr + } + if tokenNS == nil { + c.logger.Error(namespace.ErrNoNamespace.Error()) + retErr = multierror.Append(retErr, namespace.ErrNoNamespace) + return nil, auth, retErr + } + + _, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, resp.Auth.EntityID) + if err != nil { + // Best-effort clean up on error, so we log the cleanup error as a + // warning but still return as internal error. + if err := c.tokenStore.revokeOrphan(ctx, resp.Auth.ClientToken); err != nil { + c.logger.Warn("failed to clean up token lease from entity and policy lookup failure", "request_path", req.Path, "error", err) + } + return nil, nil, ErrInternalError + } + + resp.Auth.TokenPolicies = policyutil.SanitizePolicies(resp.Auth.Policies, policyutil.DoNotAddDefaultPolicy) + switch resp.Auth.TokenType { + case logical.TokenTypeBatch: + case logical.TokenTypeService: + if err := c.expiration.RegisterAuth(ctx, &logical.TokenEntry{ + TTL: auth.TTL, + Policies: auth.TokenPolicies, + Path: resp.Auth.CreationPath, + NamespaceID: ns.ID, + }, resp.Auth); err != nil { + // Best-effort clean up on error, so we log the cleanup error as + // a warning but still return as internal error. + if err := c.tokenStore.revokeOrphan(ctx, resp.Auth.ClientToken); err != nil { + c.logger.Warn("failed to clean up token lease during auth/token/ request", "request_path", req.Path, "error", err) + } + c.logger.Error("failed to register token lease during auth/token/ request", "request_path", req.Path, "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + } + + // We do these later since it's not meaningful for backends/expmgr to + // have what is purely a snapshot of current identity policies, and + // plugins can be confused if they are checking contents of + // Auth.Policies instead of Auth.TokenPolicies + resp.Auth.Policies = policyutil.SanitizePolicies(append(resp.Auth.Policies, identityPolicies[te.NamespaceID]...), policyutil.DoNotAddDefaultPolicy) + resp.Auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[te.NamespaceID], policyutil.DoNotAddDefaultPolicy) + delete(identityPolicies, te.NamespaceID) + resp.Auth.ExternalNamespacePolicies = identityPolicies + } + + if resp != nil && + req.Path == "cubbyhole/response" && + len(te.Policies) == 1 && + te.Policies[0] == responseWrappingPolicyName { + resp.AddWarning("Reading from 'cubbyhole/response' is deprecated. Please use sys/wrapping/unwrap to unwrap responses, as it provides additional security checks and other benefits.") + } + + // Return the response and error + if routeErr != nil { + retErr = multierror.Append(retErr, routeErr) + } + + return resp, auth, retErr +} + +// handleLoginRequest is used to handle a login request, which is an +// unauthenticated request to the backend. +func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) { + defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now()) + + req.Unauthenticated = true + + var nonHMACReqDataKeys []string + entry := c.router.MatchingMountEntry(ctx, req.Path) + if entry != nil { + // Get and set ignored HMAC'd value. + if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok { + nonHMACReqDataKeys = rawVals.([]string) + } + } + + // Do an unauth check. This will cause EGP policies to be checked + var auth *logical.Auth + var ctErr error + auth, _, ctErr = c.checkToken(ctx, req, true) + if ctErr == logical.ErrPerfStandbyPleaseForward { + return nil, nil, ctErr + } + if ctErr != nil { + // If it is an internal error we return that, otherwise we + // return invalid request so that the status codes can be correct + var errType error + switch ctErr { + case ErrInternalError, logical.ErrPermissionDenied: + errType = ctErr + default: + errType = logical.ErrInvalidRequest + } + + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + OuterErr: ctErr, + NonHMACReqDataKeys: nonHMACReqDataKeys, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "path", req.Path, "error", err) + return nil, nil, ErrInternalError + } + + if errType != nil { + retErr = multierror.Append(retErr, errType) + } + if ctErr == ErrInternalError { + return nil, auth, retErr + } + return logical.ErrorResponse(ctErr.Error()), auth, retErr + } + + // Create an audit trail of the request. Attach auth if it was returned, + // e.g. if a token was provided. + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + NonHMACReqDataKeys: nonHMACReqDataKeys, + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "path", req.Path, "error", err) + return nil, nil, ErrInternalError + } + + // The token store uses authentication even when creating a new token, + // so it's handled in handleRequest. It should not be reached here. + if strings.HasPrefix(req.Path, "auth/token/") { + c.logger.Error("unexpected login request for token backend", "request_path", req.Path) + return nil, nil, ErrInternalError + } + + // Route the request + resp, routeErr := c.doRouting(ctx, req) + if resp != nil { + // If wrapping is used, use the shortest between the request and response + var wrapTTL time.Duration + var wrapFormat, creationPath string + var sealWrap bool + + // Ensure no wrap info information is set other than, possibly, the TTL + if resp.WrapInfo != nil { + if resp.WrapInfo.TTL > 0 { + wrapTTL = resp.WrapInfo.TTL + } + wrapFormat = resp.WrapInfo.Format + creationPath = resp.WrapInfo.CreationPath + sealWrap = resp.WrapInfo.SealWrap + resp.WrapInfo = nil + } + + if req.WrapInfo != nil { + if req.WrapInfo.TTL > 0 { + switch { + case wrapTTL == 0: + wrapTTL = req.WrapInfo.TTL + case req.WrapInfo.TTL < wrapTTL: + wrapTTL = req.WrapInfo.TTL + } + } + if req.WrapInfo.Format != "" && wrapFormat == "" { + wrapFormat = req.WrapInfo.Format + } + } + + if wrapTTL > 0 { + resp.WrapInfo = &wrapping.ResponseWrapInfo{ + TTL: wrapTTL, + Format: wrapFormat, + CreationPath: creationPath, + SealWrap: sealWrap, + } + } + } + + // A login request should never return a secret! + if resp != nil && resp.Secret != nil { + c.logger.Error("unexpected Secret response for login path", "request_path", req.Path) + return nil, nil, ErrInternalError + } + + // If the response generated an authentication, then generate the token + if resp != nil && resp.Auth != nil { + + var entity *identity.Entity + auth = resp.Auth + + mEntry := c.router.MatchingMountEntry(ctx, req.Path) + + if auth.Alias != nil && + mEntry != nil && + !mEntry.Local && + c.identityStore != nil { + // Overwrite the mount type and mount path in the alias + // information + auth.Alias.MountType = req.MountType + auth.Alias.MountAccessor = req.MountAccessor + + if auth.Alias.Name == "" { + return nil, nil, fmt.Errorf("missing name in alias") + } + + var err error + + // Fetch the entity for the alias, or create an entity if one + // doesn't exist. + entity, err = c.identityStore.CreateOrFetchEntity(ctx, auth.Alias) + if err != nil { + entity, err = possiblyForwardAliasCreation(ctx, c, err, auth, entity) + } + if err != nil { + return nil, nil, err + } + if entity == nil { + return nil, nil, fmt.Errorf("failed to create an entity for the authenticated alias") + } + + if entity.Disabled { + return nil, nil, logical.ErrPermissionDenied + } + + auth.EntityID = entity.ID + if auth.GroupAliases != nil { + validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, auth.EntityID, auth.GroupAliases) + if err != nil { + return nil, nil, err + } + auth.GroupAliases = validAliases + } + } + + // Determine the source of the login + source := c.router.MatchingMount(ctx, req.Path) + source = strings.TrimPrefix(source, credentialRoutePrefix) + source = strings.Replace(source, "/", "-", -1) + + // Prepend the source to the display name + auth.DisplayName = strings.TrimSuffix(source+auth.DisplayName, "-") + + sysView := c.router.MatchingSystemView(ctx, req.Path) + if sysView == nil { + c.logger.Error("unable to look up sys view for login path", "request_path", req.Path) + return nil, nil, ErrInternalError + } + + tokenTTL, warnings, err := framework.CalculateTTL(sysView, 0, auth.TTL, auth.Period, auth.MaxTTL, auth.ExplicitMaxTTL, time.Time{}) + if err != nil { + return nil, nil, err + } + for _, warning := range warnings { + resp.AddWarning(warning) + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, nil, err + } + _, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, ns, auth.EntityID) + if err != nil { + return nil, nil, ErrInternalError + } + + auth.TokenPolicies = policyutil.SanitizePolicies(auth.Policies, !auth.NoDefaultPolicy) + allPolicies := policyutil.SanitizePolicies(append(auth.TokenPolicies, identityPolicies[ns.ID]...), policyutil.DoNotAddDefaultPolicy) + + // Prevent internal policies from being assigned to tokens. We check + // this on auth.Policies including derived ones from Identity before + // actually making the token. + for _, policy := range allPolicies { + if policy == "root" { + return logical.ErrorResponse("auth methods cannot create root tokens"), nil, logical.ErrInvalidRequest + } + if strutil.StrListContains(nonAssignablePolicies, policy) { + return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil, logical.ErrInvalidRequest + } + } + + var registerFunc RegisterAuthFunc + var funcGetErr error + // Batch tokens should not be forwarded to perf standby + if auth.TokenType == logical.TokenTypeBatch { + registerFunc = c.RegisterAuth + } else { + registerFunc, funcGetErr = getAuthRegisterFunc(c) + } + if funcGetErr != nil { + retErr = multierror.Append(retErr, funcGetErr) + return nil, auth, retErr + } + + err = registerFunc(ctx, tokenTTL, req.Path, auth) + switch { + case err == nil: + case err == ErrInternalError: + return nil, auth, err + default: + return logical.ErrorResponse(err.Error()), auth, logical.ErrInvalidRequest + } + + auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[ns.ID], policyutil.DoNotAddDefaultPolicy) + delete(identityPolicies, ns.ID) + auth.ExternalNamespacePolicies = identityPolicies + auth.Policies = allPolicies + + // Attach the display name, might be used by audit backends + req.DisplayName = auth.DisplayName + + } + + return resp, auth, routeErr +} + +// RegisterAuth uses a logical.Auth object to create a token entry in the token +// store, and registers a corresponding token lease to the expiration manager. +func (c *Core) RegisterAuth(ctx context.Context, tokenTTL time.Duration, path string, auth *logical.Auth) error { + // We first assign token policies to what was returned from the backend + // via auth.Policies. Then, we get the full set of policies into + // auth.Policies from the backend + entity information -- this is not + // stored in the token, but we perform sanity checks on it and return + // that information to the user. + + // Generate a token + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + te := logical.TokenEntry{ + Path: path, + Meta: auth.Metadata, + DisplayName: auth.DisplayName, + CreationTime: time.Now().Unix(), + TTL: tokenTTL, + NumUses: auth.NumUses, + EntityID: auth.EntityID, + BoundCIDRs: auth.BoundCIDRs, + Policies: auth.TokenPolicies, + NamespaceID: ns.ID, + ExplicitMaxTTL: auth.ExplicitMaxTTL, + Type: auth.TokenType, + } + + if te.TTL == 0 && (len(te.Policies) != 1 || te.Policies[0] != "root") { + c.logger.Error("refusing to create a non-root zero TTL token") + return ErrInternalError + } + + if err := c.tokenStore.create(ctx, &te); err != nil { + c.logger.Error("failed to create token", "error", err) + return ErrInternalError + } + + // Populate the client token, accessor, and TTL + auth.ClientToken = te.ID + auth.Accessor = te.Accessor + auth.TTL = te.TTL + auth.Orphan = te.Parent == "" + + switch auth.TokenType { + case logical.TokenTypeBatch: + // Ensure it's not marked renewable since it isn't + auth.Renewable = false + case logical.TokenTypeService: + // Register with the expiration manager + if err := c.expiration.RegisterAuth(ctx, &te, auth); err != nil { + if err := c.tokenStore.revokeOrphan(ctx, te.ID); err != nil { + c.logger.Warn("failed to clean up token lease during login request", "request_path", path, "error", err) + } + c.logger.Error("failed to register token lease during login request", "request_path", path, "error", err) + return ErrInternalError + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling_util.go b/vendor/github.com/hashicorp/vault/vault/request_handling_util.go new file mode 100644 index 00000000..652d08b2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/request_handling_util.go @@ -0,0 +1,47 @@ +// +build !enterprise + +package vault + +import ( + "context" + + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/sdk/logical" +) + +func waitForReplicationState(context.Context, *Core, *logical.Request) error { return nil } + +func checkNeedsCG(context.Context, *Core, *logical.Request, *logical.Auth, error, []string) (error, *logical.Response, *logical.Auth, error) { + return nil, nil, nil, nil +} + +func checkErrControlGroupTokenNeedsCreated(err error) bool { + return false +} + +func shouldForward(c *Core, resp *logical.Response, err error) bool { + return false +} + +func syncCounter(c *Core) { +} + +func couldForward(c *Core) bool { + return false +} + +func forward(ctx context.Context, c *Core, req *logical.Request) (*logical.Response, error) { + panic("forward called in OSS Vault") +} + +func getLeaseRegisterFunc(c *Core) (func(context.Context, *logical.Request, *logical.Response) (string, error), error) { + return c.expiration.Register, nil +} + +func getAuthRegisterFunc(c *Core) (RegisterAuthFunc, error) { + return c.RegisterAuth, nil +} + +func possiblyForwardAliasCreation(ctx context.Context, c *Core, inErr error, auth *logical.Auth, entity *identity.Entity) (*identity.Entity, error) { + return entity, inErr +} diff --git a/vendor/github.com/hashicorp/vault/vault/rollback.go b/vendor/github.com/hashicorp/vault/vault/rollback.go new file mode 100644 index 00000000..cd85f0b0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/rollback.go @@ -0,0 +1,317 @@ +package vault + +import ( + "context" + "errors" + "strings" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // rollbackPeriod is how often we attempt rollbacks for all the backends + rollbackPeriod = time.Minute +) + +// RollbackManager is responsible for performing rollbacks of partial +// secrets within logical backends. +// +// During normal operations, it is possible for logical backends to +// error partially through an operation. These are called "partial secrets": +// they are never sent back to a user, but they do need to be cleaned up. +// This manager handles that by periodically (on a timer) requesting that the +// backends clean up. +// +// The RollbackManager periodically initiates a logical.RollbackOperation +// on every mounted logical backend. It ensures that only one rollback operation +// is in-flight at any given time within a single seal/unseal phase. +type RollbackManager struct { + logger log.Logger + + // This gives the current mount table of both logical and credential backends, + // plus a RWMutex that is locked for reading. It is up to the caller to RUnlock + // it when done with the mount table. + backends func() []*MountEntry + + router *Router + period time.Duration + + inflightAll sync.WaitGroup + inflight map[string]*rollbackState + inflightLock sync.RWMutex + + doneCh chan struct{} + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + quitContext context.Context + + core *Core +} + +// rollbackState is used to track the state of a single rollback attempt +type rollbackState struct { + lastError error + sync.WaitGroup + cancelLockGrabCtx context.Context + cancelLockGrabCtxCancel context.CancelFunc +} + +// NewRollbackManager is used to create a new rollback manager +func NewRollbackManager(ctx context.Context, logger log.Logger, backendsFunc func() []*MountEntry, router *Router, core *Core) *RollbackManager { + r := &RollbackManager{ + logger: logger, + backends: backendsFunc, + router: router, + period: rollbackPeriod, + inflight: make(map[string]*rollbackState), + doneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + quitContext: ctx, + core: core, + } + return r +} + +// Start starts the rollback manager +func (m *RollbackManager) Start() { + go m.run() +} + +// Stop stops the running manager. This will wait for any in-flight +// rollbacks to complete. +func (m *RollbackManager) Stop() { + m.shutdownLock.Lock() + defer m.shutdownLock.Unlock() + if !m.shutdown { + m.shutdown = true + close(m.shutdownCh) + <-m.doneCh + } + m.inflightAll.Wait() +} + +// run is a long running routine to periodically invoke rollback +func (m *RollbackManager) run() { + m.logger.Info("starting rollback manager") + tick := time.NewTicker(m.period) + defer tick.Stop() + defer close(m.doneCh) + for { + select { + case <-tick.C: + m.triggerRollbacks() + + case <-m.shutdownCh: + m.logger.Info("stopping rollback manager") + return + } + } +} + +// triggerRollbacks is used to trigger the rollbacks across all the backends +func (m *RollbackManager) triggerRollbacks() { + + backends := m.backends() + + for _, e := range backends { + path := e.Path + if e.Table == credentialTableType { + path = credentialRoutePrefix + path + } + + // When the mount is filtered, the backend will be nil + ctx := namespace.ContextWithNamespace(m.quitContext, e.namespace) + backend := m.router.MatchingBackend(ctx, path) + if backend == nil { + continue + } + fullPath := e.namespace.Path + path + + // Start a rollback if necessary + m.startOrLookupRollback(ctx, fullPath, true) + } +} + +// startOrLookupRollback is used to start an async rollback attempt. +// This must be called with the inflightLock held. +func (m *RollbackManager) startOrLookupRollback(ctx context.Context, fullPath string, grabStatelock bool) *rollbackState { + m.inflightLock.Lock() + defer m.inflightLock.Unlock() + rsInflight, ok := m.inflight[fullPath] + if ok { + return rsInflight + } + + cancelCtx, cancelFunc := context.WithCancel(context.Background()) + rs := &rollbackState{ + cancelLockGrabCtx: cancelCtx, + cancelLockGrabCtxCancel: cancelFunc, + } + + // If no inflight rollback is already running, kick one off + m.inflight[fullPath] = rs + rs.Add(1) + m.inflightAll.Add(1) + go m.attemptRollback(ctx, fullPath, rs, grabStatelock) + return rs +} + +// attemptRollback invokes a RollbackOperation for the given path +func (m *RollbackManager) attemptRollback(ctx context.Context, fullPath string, rs *rollbackState, grabStatelock bool) (err error) { + defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(fullPath, "/", "-", -1)}, time.Now()) + + defer func() { + rs.lastError = err + rs.Done() + m.inflightAll.Done() + m.inflightLock.Lock() + delete(m.inflight, fullPath) + m.inflightLock.Unlock() + }() + + ns, err := namespace.FromContext(ctx) + if err != nil { + m.logger.Error("rollback failed to derive namespace from context", "path", fullPath) + return err + } + if ns == nil { + m.logger.Error("rollback found no namespace", "path", fullPath) + return namespace.ErrNoNamespace + } + + // Invoke a RollbackOperation + req := &logical.Request{ + Operation: logical.RollbackOperation, + Path: ns.TrimmedPath(fullPath), + } + + releaseLock := true + if grabStatelock { + doneCh := make(chan struct{}) + defer close(doneCh) + + stopCh := make(chan struct{}) + go func() { + defer close(stopCh) + + select { + case <-m.shutdownCh: + case <-rs.cancelLockGrabCtx.Done(): + case <-doneCh: + } + }() + + // Grab the statelock or stop + if stopped := grabLockOrStop(m.core.stateLock.RLock, m.core.stateLock.RUnlock, stopCh); stopped { + // If we stopped due to shutdown, return. Otherwise another thread + // is holding the lock for us, continue on. + select { + case <-m.shutdownCh: + return errors.New("rollback shutting down") + default: + releaseLock = false + } + } + } + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithTimeout(ctx, DefaultMaxRequestDuration) + resp, err := m.router.Route(ctx, req) + if grabStatelock && releaseLock { + m.core.stateLock.RUnlock() + } + cancelFunc() + + // If the error is an unsupported operation, then it doesn't + // matter, the backend doesn't support it. + if err == logical.ErrUnsupportedOperation { + err = nil + } + // If we failed due to read-only storage, we can't do anything; ignore + if (err != nil && strings.Contains(err.Error(), logical.ErrReadOnly.Error())) || + (resp.IsError() && strings.Contains(resp.Error().Error(), logical.ErrReadOnly.Error())) { + err = nil + } + if err != nil { + m.logger.Error("error rolling back", "path", fullPath, "error", err) + } + return +} + +// Rollback is used to trigger an immediate rollback of the path, +// or to join an existing rollback operation if in flight. Caller should have +// core's statelock held (write OR read). If an already inflight rollback is +// happening this function will simply wait for it to complete +func (m *RollbackManager) Rollback(ctx context.Context, path string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + fullPath := ns.Path + path + + // Check for an existing attempt or start one if none + rs := m.startOrLookupRollback(ctx, fullPath, false) + + // Since we have the statelock held, tell any inflight rollback to give up + // trying to acquire it. This will prevent deadlocks in the case where we + // have the write lock. In the case where it was waiting to grab + // a read lock it will then simply continue with the rollback + // operation under the protection of our write lock. + rs.cancelLockGrabCtxCancel() + + // It's safe to do this, since the other thread either already has the lock + // held, or we just canceled it above. + rs.Wait() + + // Return the last error + return rs.lastError +} + +// The methods below are the hooks from core that are called pre/post seal. + +// startRollback is used to start the rollback manager after unsealing +func (c *Core) startRollback() error { + backendsFunc := func() []*MountEntry { + ret := []*MountEntry{} + c.mountsLock.RLock() + defer c.mountsLock.RUnlock() + // During teardown/setup after a leader change or unseal there could be + // something racy here so make sure the table isn't nil + if c.mounts != nil { + for _, entry := range c.mounts.Entries { + ret = append(ret, entry) + } + } + c.authLock.RLock() + defer c.authLock.RUnlock() + // During teardown/setup after a leader change or unseal there could be + // something racy here so make sure the table isn't nil + if c.auth != nil { + for _, entry := range c.auth.Entries { + ret = append(ret, entry) + } + } + return ret + } + rollbackLogger := c.baseLogger.Named("rollback") + c.AddLogger(rollbackLogger) + c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c) + c.rollback.Start() + return nil +} + +// stopRollback is used to stop running the rollback manager before sealing +func (c *Core) stopRollback() error { + if c.rollback != nil { + c.rollback.Stop() + c.rollback = nil + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/router.go b/vendor/github.com/hashicorp/vault/vault/router.go new file mode 100644 index 00000000..2666c007 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/router.go @@ -0,0 +1,860 @@ +package vault + +import ( + "context" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + radix "github.com/armon/go-radix" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + deniedPassthroughRequestHeaders = []string{ + consts.AuthHeaderName, + } +) + +// Router is used to do prefix based routing of a request to a logical backend +type Router struct { + l sync.RWMutex + root *radix.Tree + mountUUIDCache *radix.Tree + mountAccessorCache *radix.Tree + tokenStoreSaltFunc func(context.Context) (*salt.Salt, error) + // storagePrefix maps the prefix used for storage (ala the BarrierView) + // to the backend. This is used to map a key back into the backend that owns it. + // For example, logical/uuid1/foobar -> secrets/ (kv backend) + foobar + storagePrefix *radix.Tree + logger hclog.Logger +} + +// NewRouter returns a new router +func NewRouter() *Router { + r := &Router{ + root: radix.New(), + storagePrefix: radix.New(), + mountUUIDCache: radix.New(), + mountAccessorCache: radix.New(), + } + return r +} + +// routeEntry is used to represent a mount point in the router +type routeEntry struct { + tainted bool + backend logical.Backend + mountEntry *MountEntry + storageView logical.Storage + storagePrefix string + rootPaths atomic.Value + loginPaths atomic.Value + l sync.RWMutex +} + +type validateMountResponse struct { + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"` + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor"` + MountPath string `json:"mount_path" structs:"mount_path" mapstructure:"mount_path"` + MountLocal bool `json:"mount_local" structs:"mount_local" mapstructure:"mount_local"` +} + +func (r *Router) reset() { + r.l.Lock() + defer r.l.Unlock() + r.root = radix.New() + r.storagePrefix = radix.New() + r.mountUUIDCache = radix.New() + r.mountAccessorCache = radix.New() +} + +// validateMountByAccessor returns the mount type and ID for a given mount +// accessor +func (r *Router) validateMountByAccessor(accessor string) *validateMountResponse { + if accessor == "" { + return nil + } + + mountEntry := r.MatchingMountByAccessor(accessor) + if mountEntry == nil { + return nil + } + + mountPath := mountEntry.Path + if mountEntry.Table == credentialTableType { + mountPath = credentialRoutePrefix + mountPath + } + + return &validateMountResponse{ + MountAccessor: mountEntry.Accessor, + MountType: mountEntry.Type, + MountPath: mountPath, + MountLocal: mountEntry.Local, + } +} + +// SaltID is used to apply a salt and hash to an ID to make sure its not reversible +func (re *routeEntry) SaltID(id string) string { + return salt.SaltID(re.mountEntry.UUID, id, salt.SHA1Hash) +} + +// Mount is used to expose a logical backend at a given prefix, using a unique salt, +// and the barrier view for that path. +func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *MountEntry, storageView *BarrierView) error { + r.l.Lock() + defer r.l.Unlock() + + // prepend namespace + prefix = mountEntry.Namespace().Path + prefix + + // Check if this is a nested mount + if existing, _, ok := r.root.LongestPrefix(prefix); ok && existing != "" { + return fmt.Errorf("cannot mount under existing mount %q", existing) + } + + // Build the paths + paths := new(logical.Paths) + if backend != nil { + specialPaths := backend.SpecialPaths() + if specialPaths != nil { + paths = specialPaths + } + } + + // Create a mount entry + re := &routeEntry{ + tainted: mountEntry.Tainted, + backend: backend, + mountEntry: mountEntry, + storagePrefix: storageView.Prefix(), + storageView: storageView, + } + re.rootPaths.Store(pathsToRadix(paths.Root)) + re.loginPaths.Store(pathsToRadix(paths.Unauthenticated)) + + switch { + case prefix == "": + return fmt.Errorf("missing prefix to be used for router entry; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case re.storagePrefix == "": + return fmt.Errorf("missing storage view prefix; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case re.mountEntry.UUID == "": + return fmt.Errorf("missing mount identifier; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case re.mountEntry.Accessor == "": + return fmt.Errorf("missing mount accessor; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + } + + r.root.Insert(prefix, re) + r.storagePrefix.Insert(re.storagePrefix, re) + r.mountUUIDCache.Insert(re.mountEntry.UUID, re.mountEntry) + r.mountAccessorCache.Insert(re.mountEntry.Accessor, re.mountEntry) + + return nil +} + +// Unmount is used to remove a logical backend from a given prefix +func (r *Router) Unmount(ctx context.Context, prefix string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + prefix = ns.Path + prefix + + r.l.Lock() + defer r.l.Unlock() + + // Fast-path out if the backend doesn't exist + raw, ok := r.root.Get(prefix) + if !ok { + return nil + } + + // Call backend's Cleanup routine + re := raw.(*routeEntry) + if re.backend != nil { + re.backend.Cleanup(ctx) + } + + // Purge from the radix trees + r.root.Delete(prefix) + r.storagePrefix.Delete(re.storagePrefix) + r.mountUUIDCache.Delete(re.mountEntry.UUID) + r.mountAccessorCache.Delete(re.mountEntry.Accessor) + + return nil +} + +// Remount is used to change the mount location of a logical backend +func (r *Router) Remount(ctx context.Context, src, dst string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + src = ns.Path + src + dst = ns.Path + dst + + r.l.Lock() + defer r.l.Unlock() + + // Check for existing mount + raw, ok := r.root.Get(src) + if !ok { + return fmt.Errorf("no mount at %q", src) + } + + // Update the mount point + r.root.Delete(src) + r.root.Insert(dst, raw) + return nil +} + +// Taint is used to mark a path as tainted. This means only RollbackOperation +// RevokeOperation requests are allowed to proceed +func (r *Router) Taint(ctx context.Context, path string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + path = ns.Path + path + + r.l.Lock() + defer r.l.Unlock() + _, raw, ok := r.root.LongestPrefix(path) + if ok { + raw.(*routeEntry).tainted = true + } + return nil +} + +// Untaint is used to unmark a path as tainted. +func (r *Router) Untaint(ctx context.Context, path string) error { + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + path = ns.Path + path + + r.l.Lock() + defer r.l.Unlock() + _, raw, ok := r.root.LongestPrefix(path) + if ok { + raw.(*routeEntry).tainted = false + } + return nil +} + +func (r *Router) MatchingMountByUUID(mountID string) *MountEntry { + if mountID == "" { + return nil + } + + r.l.RLock() + + _, raw, ok := r.mountUUIDCache.LongestPrefix(mountID) + if !ok { + r.l.RUnlock() + return nil + } + + r.l.RUnlock() + return raw.(*MountEntry) +} + +// MatchingMountByAccessor returns the MountEntry by accessor lookup +func (r *Router) MatchingMountByAccessor(mountAccessor string) *MountEntry { + if mountAccessor == "" { + return nil + } + + r.l.RLock() + + _, raw, ok := r.mountAccessorCache.LongestPrefix(mountAccessor) + if !ok { + r.l.RUnlock() + return nil + } + + r.l.RUnlock() + return raw.(*MountEntry) +} + +// MatchingMount returns the mount prefix that would be used for a path +func (r *Router) MatchingMount(ctx context.Context, path string) string { + r.l.RLock() + mount := r.matchingMountInternal(ctx, path) + r.l.RUnlock() + return mount +} + +func (r *Router) matchingMountInternal(ctx context.Context, path string) string { + ns, err := namespace.FromContext(ctx) + if err != nil { + return "" + } + path = ns.Path + path + + mount, _, ok := r.root.LongestPrefix(path) + if !ok { + return "" + } + return mount +} + +// matchingPrefixInternal returns a mount prefix that a path may be a part of +func (r *Router) matchingPrefixInternal(ctx context.Context, path string) string { + ns, err := namespace.FromContext(ctx) + if err != nil { + return "" + } + path = ns.Path + path + + var existing string + fn := func(existingPath string, v interface{}) bool { + if strings.HasPrefix(existingPath, path) { + existing = existingPath + return true + } + return false + } + r.root.WalkPrefix(path, fn) + return existing +} + +// MountConflict determines if there are potential path conflicts +func (r *Router) MountConflict(ctx context.Context, path string) string { + r.l.RLock() + defer r.l.RUnlock() + if exactMatch := r.matchingMountInternal(ctx, path); exactMatch != "" { + return exactMatch + } + if prefixMatch := r.matchingPrefixInternal(ctx, path); prefixMatch != "" { + return prefixMatch + } + return "" +} + +// MatchingStorageByAPIPath/StoragePath returns the storage used for +// API/Storage paths respectively +func (r *Router) MatchingStorageByAPIPath(ctx context.Context, path string) logical.Storage { + return r.matchingStorage(ctx, path, true) +} +func (r *Router) MatchingStorageByStoragePath(ctx context.Context, path string) logical.Storage { + return r.matchingStorage(ctx, path, false) +} +func (r *Router) matchingStorage(ctx context.Context, path string, apiPath bool) logical.Storage { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil + } + path = ns.Path + path + + var raw interface{} + var ok bool + r.l.RLock() + if apiPath { + _, raw, ok = r.root.LongestPrefix(path) + } else { + _, raw, ok = r.storagePrefix.LongestPrefix(path) + } + r.l.RUnlock() + if !ok { + return nil + } + return raw.(*routeEntry).storageView +} + +// MatchingMountEntry returns the MountEntry used for a path +func (r *Router) MatchingMountEntry(ctx context.Context, path string) *MountEntry { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil + } + path = ns.Path + path + + r.l.RLock() + _, raw, ok := r.root.LongestPrefix(path) + r.l.RUnlock() + if !ok { + return nil + } + return raw.(*routeEntry).mountEntry +} + +// MatchingBackend returns the backend used for a path +func (r *Router) MatchingBackend(ctx context.Context, path string) logical.Backend { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil + } + path = ns.Path + path + + r.l.RLock() + _, raw, ok := r.root.LongestPrefix(path) + r.l.RUnlock() + if !ok { + return nil + } + return raw.(*routeEntry).backend +} + +// MatchingSystemView returns the SystemView used for a path +func (r *Router) MatchingSystemView(ctx context.Context, path string) logical.SystemView { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil + } + path = ns.Path + path + + r.l.RLock() + _, raw, ok := r.root.LongestPrefix(path) + r.l.RUnlock() + if !ok || raw.(*routeEntry).backend == nil { + return nil + } + return raw.(*routeEntry).backend.System() +} + +// MatchingStoragePrefixByAPIPath the storage prefix for the given api path +func (r *Router) MatchingStoragePrefixByAPIPath(ctx context.Context, path string) (string, bool) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return "", false + } + path = ns.Path + path + + _, prefix, found := r.matchingMountEntryByPath(ctx, path, true) + return prefix, found +} + +// MatchingAPIPrefixByStoragePath the api path information for the given storage path +func (r *Router) MatchingAPIPrefixByStoragePath(ctx context.Context, path string) (*namespace.Namespace, string, string, bool) { + me, prefix, found := r.matchingMountEntryByPath(ctx, path, false) + if !found { + return nil, "", "", found + } + + mountPath := me.Path + // Add back the prefix for credential backends + if strings.HasPrefix(path, credentialBarrierPrefix) { + mountPath = credentialRoutePrefix + mountPath + } + + return me.Namespace(), mountPath, prefix, found +} + +func (r *Router) matchingMountEntryByPath(ctx context.Context, path string, apiPath bool) (*MountEntry, string, bool) { + var raw interface{} + var ok bool + r.l.RLock() + if apiPath { + _, raw, ok = r.root.LongestPrefix(path) + } else { + _, raw, ok = r.storagePrefix.LongestPrefix(path) + } + r.l.RUnlock() + if !ok { + return nil, "", false + } + + // Extract the mount path and storage prefix + re := raw.(*routeEntry) + prefix := re.storagePrefix + + return re.mountEntry, prefix, true +} + +// Route is used to route a given request +func (r *Router) Route(ctx context.Context, req *logical.Request) (*logical.Response, error) { + resp, _, _, err := r.routeCommon(ctx, req, false) + return resp, err +} + +// RouteExistenceCheck is used to route a given existence check request +func (r *Router) RouteExistenceCheck(ctx context.Context, req *logical.Request) (*logical.Response, bool, bool, error) { + resp, ok, exists, err := r.routeCommon(ctx, req, true) + return resp, ok, exists, err +} + +func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenceCheck bool) (*logical.Response, bool, bool, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, false, false, err + } + + // Find the mount point + r.l.RLock() + adjustedPath := req.Path + mount, raw, ok := r.root.LongestPrefix(ns.Path + adjustedPath) + if !ok && !strings.HasSuffix(adjustedPath, "/") { + // Re-check for a backend by appending a slash. This lets "foo" mean + // "foo/" at the root level which is almost always what we want. + adjustedPath += "/" + mount, raw, ok = r.root.LongestPrefix(ns.Path + adjustedPath) + } + r.l.RUnlock() + if !ok { + return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath + } + req.Path = adjustedPath + defer metrics.MeasureSince([]string{"route", string(req.Operation), + strings.Replace(mount, "/", "-", -1)}, time.Now()) + re := raw.(*routeEntry) + + // Grab a read lock on the route entry, this protects against the backend + // being reloaded during a request. The exception is a renew request on the + // token store; such a request will have already been routed through the + // token store -> exp manager -> here so we need to not grab the lock again + // or we'll be recursively grabbing it. + if !(req.Operation == logical.RenewOperation && strings.HasPrefix(req.Path, "auth/token/")) { + re.l.RLock() + defer re.l.RUnlock() + } + + // Filtered mounts will have a nil backend + if re.backend == nil { + return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath + } + + // If the path is tainted, we reject any operation except for + // Rollback and Revoke + if re.tainted { + switch req.Operation { + case logical.RevokeOperation, logical.RollbackOperation: + default: + return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath + } + } + + // Adjust the path to exclude the routing prefix + originalPath := req.Path + req.Path = strings.TrimPrefix(ns.Path+req.Path, mount) + req.MountPoint = mount + req.MountType = re.mountEntry.Type + if req.Path == "/" { + req.Path = "" + } + + // Attach the storage view for the request + req.Storage = re.storageView + + originalEntityID := req.EntityID + + // Hash the request token unless the request is being routed to the token + // or system backend. + clientToken := req.ClientToken + switch { + case strings.HasPrefix(originalPath, "auth/token/"): + case strings.HasPrefix(originalPath, "sys/"): + case strings.HasPrefix(originalPath, cubbyholeMountPath): + if req.Operation == logical.RollbackOperation { + // Backend doesn't support this and it can't properly look up a + // cubbyhole ID so just return here + return nil, false, false, nil + } + + te := req.TokenEntry() + + if te == nil { + return nil, false, false, fmt.Errorf("nil token entry") + } + + if te.Type != logical.TokenTypeService { + return logical.ErrorResponse(`cubbyhole operations are only supported by "service" type tokens`), false, false, nil + } + + switch { + case te.NamespaceID == namespace.RootNamespaceID && !strings.HasPrefix(req.ClientToken, "s."): + // In order for the token store to revoke later, we need to have the same + // salted ID, so we double-salt what's going to the cubbyhole backend + salt, err := r.tokenStoreSaltFunc(ctx) + if err != nil { + return nil, false, false, err + } + req.ClientToken = re.SaltID(salt.SaltID(req.ClientToken)) + + default: + if te.CubbyholeID == "" { + return nil, false, false, fmt.Errorf("empty cubbyhole id") + } + req.ClientToken = te.CubbyholeID + } + + default: + req.ClientToken = re.SaltID(req.ClientToken) + } + + // Cache the pointer to the original connection object + originalConn := req.Connection + + // Cache the identifier of the request + originalReqID := req.ID + + // Cache the client token's number of uses in the request + originalClientTokenRemainingUses := req.ClientTokenRemainingUses + req.ClientTokenRemainingUses = 0 + + originalMFACreds := req.MFACreds + req.MFACreds = nil + + originalControlGroup := req.ControlGroup + req.ControlGroup = nil + + // Cache the headers + headers := req.Headers + req.Headers = nil + + // Filter and add passthrough headers to the backend + var passthroughRequestHeaders []string + if rawVal, ok := re.mountEntry.synthesizedConfigCache.Load("passthrough_request_headers"); ok { + passthroughRequestHeaders = rawVal.([]string) + } + var allowedResponseHeaders []string + if rawVal, ok := re.mountEntry.synthesizedConfigCache.Load("allowed_response_headers"); ok { + allowedResponseHeaders = rawVal.([]string) + } + + if len(passthroughRequestHeaders) > 0 { + req.Headers = filteredHeaders(headers, passthroughRequestHeaders, deniedPassthroughRequestHeaders) + } + + // Cache the wrap info of the request + var wrapInfo *logical.RequestWrapInfo + if req.WrapInfo != nil { + wrapInfo = &logical.RequestWrapInfo{ + TTL: req.WrapInfo.TTL, + Format: req.WrapInfo.Format, + SealWrap: req.WrapInfo.SealWrap, + } + } + + originalPolicyOverride := req.PolicyOverride + reqTokenEntry := req.TokenEntry() + req.SetTokenEntry(nil) + + // Reset the request before returning + defer func() { + req.Path = originalPath + req.MountPoint = mount + req.MountType = re.mountEntry.Type + req.Connection = originalConn + req.ID = originalReqID + req.Storage = nil + req.ClientToken = clientToken + req.ClientTokenRemainingUses = originalClientTokenRemainingUses + req.WrapInfo = wrapInfo + req.Headers = headers + req.PolicyOverride = originalPolicyOverride + // This is only set in one place, after routing, so should never be set + // by a backend + req.SetLastRemoteWAL(0) + + // This will be used for attaching the mount accessor for the identities + // returned by the authentication backends + req.MountAccessor = re.mountEntry.Accessor + + req.EntityID = originalEntityID + + req.MFACreds = originalMFACreds + + req.SetTokenEntry(reqTokenEntry) + req.ControlGroup = originalControlGroup + }() + + // Invoke the backend + if existenceCheck { + ok, exists, err := re.backend.HandleExistenceCheck(ctx, req) + return nil, ok, exists, err + } else { + resp, err := re.backend.HandleRequest(ctx, req) + if resp != nil { + if len(allowedResponseHeaders) > 0 { + resp.Headers = filteredHeaders(resp.Headers, allowedResponseHeaders, nil) + } else { + resp.Headers = nil + } + + if resp.Auth != nil { + // When a token gets renewed, the request hits this path and + // reaches token store. Token store delegates the renewal to the + // expiration manager. Expiration manager in-turn creates a + // different logical request and forwards the request to the auth + // backend that had initially authenticated the login request. The + // forwarding to auth backend will make this code path hit for the + // second time for the same renewal request. The accessors in the + // Alias structs should be of the auth backend and not of the token + // store. Therefore, avoiding the overwriting of accessors by + // having a check for path prefix having "renew". This gets applied + // for "renew" and "renew-self" requests. + if !strings.HasPrefix(req.Path, "renew") { + if resp.Auth.Alias != nil { + resp.Auth.Alias.MountAccessor = re.mountEntry.Accessor + } + for _, alias := range resp.Auth.GroupAliases { + alias.MountAccessor = re.mountEntry.Accessor + } + } + + switch re.mountEntry.Type { + case "token", "ns_token": + // Nothing; we respect what the token store is telling us and + // we don't allow tuning + default: + switch re.mountEntry.Config.TokenType { + case logical.TokenTypeService, logical.TokenTypeBatch: + resp.Auth.TokenType = re.mountEntry.Config.TokenType + case logical.TokenTypeDefault, logical.TokenTypeDefaultService: + switch resp.Auth.TokenType { + case logical.TokenTypeDefault, logical.TokenTypeDefaultService, logical.TokenTypeService: + resp.Auth.TokenType = logical.TokenTypeService + default: + resp.Auth.TokenType = logical.TokenTypeBatch + } + case logical.TokenTypeDefaultBatch: + switch resp.Auth.TokenType { + case logical.TokenTypeDefault, logical.TokenTypeDefaultBatch, logical.TokenTypeBatch: + resp.Auth.TokenType = logical.TokenTypeBatch + default: + resp.Auth.TokenType = logical.TokenTypeService + } + } + } + } + } + + return resp, false, false, err + } +} + +// RootPath checks if the given path requires root privileges +func (r *Router) RootPath(ctx context.Context, path string) bool { + ns, err := namespace.FromContext(ctx) + if err != nil { + return false + } + + adjustedPath := ns.Path + path + + r.l.RLock() + mount, raw, ok := r.root.LongestPrefix(adjustedPath) + r.l.RUnlock() + if !ok { + return false + } + re := raw.(*routeEntry) + + // Trim to get remaining path + remain := strings.TrimPrefix(adjustedPath, mount) + + // Check the rootPaths of this backend + rootPaths := re.rootPaths.Load().(*radix.Tree) + match, raw, ok := rootPaths.LongestPrefix(remain) + if !ok { + return false + } + prefixMatch := raw.(bool) + + // Handle the prefix match case + if prefixMatch { + return strings.HasPrefix(remain, match) + } + + // Handle the exact match case + return match == remain +} + +// LoginPath checks if the given path is used for logins +func (r *Router) LoginPath(ctx context.Context, path string) bool { + ns, err := namespace.FromContext(ctx) + if err != nil { + return false + } + + adjustedPath := ns.Path + path + + r.l.RLock() + mount, raw, ok := r.root.LongestPrefix(adjustedPath) + r.l.RUnlock() + if !ok { + return false + } + re := raw.(*routeEntry) + + // Trim to get remaining path + remain := strings.TrimPrefix(adjustedPath, mount) + + // Check the loginPaths of this backend + loginPaths := re.loginPaths.Load().(*radix.Tree) + match, raw, ok := loginPaths.LongestPrefix(remain) + if !ok { + return false + } + prefixMatch := raw.(bool) + + // Handle the prefix match case + if prefixMatch { + return strings.HasPrefix(remain, match) + } + + // Handle the exact match case + return match == remain +} + +// pathsToRadix converts a list of special paths to a radix tree. +func pathsToRadix(paths []string) *radix.Tree { + tree := radix.New() + for _, path := range paths { + // Check if this is a prefix or exact match + prefixMatch := len(path) >= 1 && path[len(path)-1] == '*' + if prefixMatch { + path = path[:len(path)-1] + } + + tree.Insert(path, prefixMatch) + } + + return tree +} + +// filteredHeaders returns a headers map[string][]string that +// contains the filtered values contained in candidateHeaders. Filtering of +// candidateHeaders from the origHeaders is done is a case-insensitive manner. +// Headers that match values from deniedHeaders will be ignored. +func filteredHeaders(origHeaders map[string][]string, candidateHeaders, deniedHeaders []string) map[string][]string { + // Short-circuit if there's nothing to filter + if len(candidateHeaders) == 0 { + return nil + } + + retHeaders := make(map[string][]string, len(origHeaders)) + + // Filter candidateHeaders values through deniedHeaders first. Returns the + // lowercased complement set. We call even if no denied headers to get the + // values lowercased. + allowedCandidateHeaders := strutil.Difference(candidateHeaders, deniedHeaders, true) + + // Create a map that uses lowercased header values as the key and the original + // header naming as the value for comparison down below. + lowerOrigHeaderKeys := make(map[string]string, len(origHeaders)) + for key := range origHeaders { + lowerOrigHeaderKeys[strings.ToLower(key)] = key + } + + // Case-insensitive compare of passthrough headers against originating + // headers. The returned headers will be the same casing as the originating + // header name. + for _, ch := range allowedCandidateHeaders { + if header, ok := lowerOrigHeaderKeys[ch]; ok { + retHeaders[header] = origHeaders[header] + } + } + + return retHeaders +} diff --git a/vendor/github.com/hashicorp/vault/vault/router_access.go b/vendor/github.com/hashicorp/vault/vault/router_access.go new file mode 100644 index 00000000..90335d7a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/router_access.go @@ -0,0 +1,16 @@ +package vault + +import "context" + +// RouterAccess provides access into some things necessary for testing +type RouterAccess struct { + c *Core +} + +func NewRouterAccess(c *Core) *RouterAccess { + return &RouterAccess{c: c} +} + +func (r *RouterAccess) StoragePrefixByAPIPath(ctx context.Context, path string) (string, bool) { + return r.c.router.MatchingStoragePrefixByAPIPath(ctx, path) +} diff --git a/vendor/github.com/hashicorp/vault/vault/router_testing.go b/vendor/github.com/hashicorp/vault/vault/router_testing.go new file mode 100644 index 00000000..db1ff4b1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/router_testing.go @@ -0,0 +1,142 @@ +package vault + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" +) + +type RouterTestHandlerFunc func(context.Context, *logical.Request) (*logical.Response, error) + +type NoopBackend struct { + sync.Mutex + + Root []string + Login []string + Paths []string + Requests []*logical.Request + Response *logical.Response + RequestHandler RouterTestHandlerFunc + Invalidations []string + DefaultLeaseTTL time.Duration + MaxLeaseTTL time.Duration + BackendType logical.BackendType +} + +func NoopBackendFactory(_ context.Context, _ *logical.BackendConfig) (logical.Backend, error) { + return &NoopBackend{}, nil +} + +func (n *NoopBackend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if req.TokenEntry() != nil { + panic("got a non-nil TokenEntry") + } + + var err error + resp := n.Response + if n.RequestHandler != nil { + resp, err = n.RequestHandler(ctx, req) + } + + n.Lock() + defer n.Unlock() + + requestCopy := *req + n.Paths = append(n.Paths, req.Path) + n.Requests = append(n.Requests, &requestCopy) + if req.Storage == nil { + return nil, fmt.Errorf("missing view") + } + + if req.Path == "panic" { + panic("as you command") + } + + return resp, err +} + +func (n *NoopBackend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { + return false, false, nil +} + +func (n *NoopBackend) SpecialPaths() *logical.Paths { + return &logical.Paths{ + Root: n.Root, + Unauthenticated: n.Login, + } +} + +func (n *NoopBackend) System() logical.SystemView { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 32 + if n.DefaultLeaseTTL > 0 { + defaultLeaseTTLVal = n.DefaultLeaseTTL + } + + if n.MaxLeaseTTL > 0 { + maxLeaseTTLVal = n.MaxLeaseTTL + } + + return logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + } +} + +func (n *NoopBackend) Cleanup(ctx context.Context) { + // noop +} + +func (n *NoopBackend) InvalidateKey(ctx context.Context, k string) { + n.Invalidations = append(n.Invalidations, k) +} + +func (n *NoopBackend) Setup(ctx context.Context, config *logical.BackendConfig) error { + return nil +} + +func (n *NoopBackend) Logger() log.Logger { + return log.NewNullLogger() +} + +func (n *NoopBackend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + return nil +} + +func (n *NoopBackend) Type() logical.BackendType { + if n.BackendType == logical.TypeUnknown { + return logical.TypeLogical + } + return n.BackendType +} + +// InitializableBackend is a backend that knows whether it has been initialized +// properly. +type InitializableBackend struct { + *NoopBackend + isInitialized bool +} + +func (b *InitializableBackend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + if b.isInitialized { + return errors.New("already initialized") + } + + // do a dummy write, to prove that the storage is not readonly + entry := &logical.StorageEntry{ + Key: "initialize/zork", + Value: []byte("quux"), + } + err := req.Storage.Put(ctx, entry) + if err != nil { + return err + } + + b.isInitialized = true + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal.go b/vendor/github.com/hashicorp/vault/vault/seal.go new file mode 100644 index 00000000..8583b2ae --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal.go @@ -0,0 +1,507 @@ +package vault + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "sync/atomic" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" +) + +const ( + // barrierSealConfigPath is the path used to store our seal configuration. + // This value is stored in plaintext, since we must be able to read it even + // with the Vault sealed. This is required so that we know how many secret + // parts must be used to reconstruct the master key. + barrierSealConfigPath = "core/seal-config" + + // recoverySealConfigPath is the path to the recovery key seal + // configuration. It lives inside the barrier. + // DEPRECATED: Use recoverySealConfigPlaintextPath instead. + recoverySealConfigPath = "core/recovery-seal-config" + + // recoverySealConfigPlaintextPath is the path to the recovery key seal + // configuration. This is stored in plaintext so that we can perform + // auto-unseal. + recoverySealConfigPlaintextPath = "core/recovery-config" + + // recoveryKeyPath is the path to the recovery key + recoveryKeyPath = "core/recovery-key" + + // StoredBarrierKeysPath is the path used for storing HSM-encrypted unseal keys + StoredBarrierKeysPath = "core/hsm/barrier-unseal-keys" + + // hsmStoredIVPath is the path to the initialization vector for stored keys + hsmStoredIVPath = "core/hsm/iv" +) + +const ( + RecoveryTypeUnsupported = "unsupported" + RecoveryTypeShamir = "shamir" +) + +type StoredKeysSupport int + +const ( + // The 0 value of StoredKeysSupport is an invalid option + StoredKeysInvalid StoredKeysSupport = iota + StoredKeysNotSupported + StoredKeysSupportedGeneric + StoredKeysSupportedShamirMaster +) + +func (s StoredKeysSupport) String() string { + switch s { + case StoredKeysNotSupported: + return "Old-style Shamir" + case StoredKeysSupportedGeneric: + return "AutoUnseal" + case StoredKeysSupportedShamirMaster: + return "New-style Shamir" + default: + return "Invalid StoredKeys type" + } +} + +type Seal interface { + SetCore(*Core) + Init(context.Context) error + Finalize(context.Context) error + + StoredKeysSupported() StoredKeysSupport + SealWrapable() bool + SetStoredKeys(context.Context, [][]byte) error + GetStoredKeys(context.Context) ([][]byte, error) + + BarrierType() string + BarrierConfig(context.Context) (*SealConfig, error) + SetBarrierConfig(context.Context, *SealConfig) error + SetCachedBarrierConfig(*SealConfig) + + RecoveryKeySupported() bool + RecoveryType() string + RecoveryConfig(context.Context) (*SealConfig, error) + RecoveryKey(context.Context) ([]byte, error) + SetRecoveryConfig(context.Context, *SealConfig) error + SetCachedRecoveryConfig(*SealConfig) + SetRecoveryKey(context.Context, []byte) error + VerifyRecoveryKey(context.Context, []byte) error + + GetAccess() seal.Access +} + +type defaultSeal struct { + access seal.Access + config atomic.Value + core *Core +} + +func NewDefaultSeal(lowLevel seal.Access) Seal { + ret := &defaultSeal{ + access: lowLevel, + } + ret.config.Store((*SealConfig)(nil)) + return ret +} + +func (d *defaultSeal) SealWrapable() bool { + return false +} + +func (d *defaultSeal) checkCore() error { + if d.core == nil { + return fmt.Errorf("seal does not have a core set") + } + return nil +} + +func (d *defaultSeal) GetAccess() seal.Access { + return d.access +} + +func (d *defaultSeal) SetAccess(access seal.Access) { + d.access = access +} + +func (d *defaultSeal) SetCore(core *Core) { + d.core = core +} + +func (d *defaultSeal) Init(ctx context.Context) error { + return nil +} + +func (d *defaultSeal) Finalize(ctx context.Context) error { + return nil +} + +func (d *defaultSeal) BarrierType() string { + return seal.Shamir +} + +func (d *defaultSeal) StoredKeysSupported() StoredKeysSupport { + isLegacy, err := d.LegacySeal() + if err != nil { + if d.core != nil && d.core.logger != nil { + d.core.logger.Error("no seal config found, can't determine if legacy or new-style shamir") + } + return StoredKeysInvalid + } + switch { + case isLegacy: + return StoredKeysNotSupported + default: + return StoredKeysSupportedShamirMaster + } +} + +func (d *defaultSeal) RecoveryKeySupported() bool { + return false +} + +func (d *defaultSeal) SetStoredKeys(ctx context.Context, keys [][]byte) error { + isLegacy, err := d.LegacySeal() + if err != nil { + return err + } + if isLegacy { + return fmt.Errorf("stored keys are not supported") + } + return writeStoredKeys(ctx, d.core.physical, d.access, keys) +} + +func (d *defaultSeal) LegacySeal() (bool, error) { + cfg := d.config.Load().(*SealConfig) + if cfg == nil { + return false, fmt.Errorf("no seal config found, can't determine if legacy or new-style shamir") + } + return cfg.StoredShares == 0, nil +} + +func (d *defaultSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) { + isLegacy, err := d.LegacySeal() + if err != nil { + return nil, err + } + if isLegacy { + return nil, fmt.Errorf("stored keys are not supported") + } + keys, err := readStoredKeys(ctx, d.core.physical, d.access) + return keys, err +} + +func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { + cfg := d.config.Load().(*SealConfig) + if cfg != nil { + return cfg.Clone(), nil + } + + if err := d.checkCore(); err != nil { + return nil, err + } + + // Fetch the core configuration + pe, err := d.core.physical.Get(ctx, barrierSealConfigPath) + if err != nil { + d.core.logger.Error("failed to read seal configuration", "error", err) + return nil, errwrap.Wrapf("failed to check seal configuration: {{err}}", err) + } + + // If the seal configuration is missing, we are not initialized + if pe == nil { + d.core.logger.Info("seal configuration missing, not initialized") + return nil, nil + } + + var conf SealConfig + + // Decode the barrier entry + if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil { + d.core.logger.Error("failed to decode seal configuration", "error", err) + return nil, errwrap.Wrapf("failed to decode seal configuration: {{err}}", err) + } + + switch conf.Type { + // This case should not be valid for other types as only this is the default + case "": + conf.Type = d.BarrierType() + case d.BarrierType(): + default: + d.core.logger.Error("barrier seal type does not match expected type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType()) + return nil, fmt.Errorf("barrier seal type of %q does not match expected type of %q", conf.Type, d.BarrierType()) + } + + // Check for a valid seal configuration + if err := conf.Validate(); err != nil { + d.core.logger.Error("invalid seal configuration", "error", err) + return nil, errwrap.Wrapf("seal validation failed: {{err}}", err) + } + + d.config.Store(&conf) + return conf.Clone(), nil +} + +func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig) error { + if err := d.checkCore(); err != nil { + return err + } + + // Provide a way to wipe out the cached value (also prevents actually + // saving a nil config) + if config == nil { + d.config.Store((*SealConfig)(nil)) + return nil + } + + config.Type = d.BarrierType() + + // If we are doing a raft unseal we do not want to persist the barrier config + // because storage isn't setup yet. + if d.core.isRaftUnseal() { + d.config.Store(config.Clone()) + return nil + } + + // Encode the seal configuration + buf, err := json.Marshal(config) + if err != nil { + return errwrap.Wrapf("failed to encode seal configuration: {{err}}", err) + } + + // Store the seal configuration + pe := &physical.Entry{ + Key: barrierSealConfigPath, + Value: buf, + } + + if err := d.core.physical.Put(ctx, pe); err != nil { + d.core.logger.Error("failed to write seal configuration", "error", err) + return errwrap.Wrapf("failed to write seal configuration: {{err}}", err) + } + + d.config.Store(config.Clone()) + + return nil +} + +func (d *defaultSeal) SetCachedBarrierConfig(config *SealConfig) { + d.config.Store(config) +} + +func (d *defaultSeal) RecoveryType() string { + return RecoveryTypeUnsupported +} + +func (d *defaultSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) { + return nil, fmt.Errorf("recovery not supported") +} + +func (d *defaultSeal) RecoveryKey(ctx context.Context) ([]byte, error) { + return nil, fmt.Errorf("recovery not supported") +} + +func (d *defaultSeal) SetRecoveryConfig(ctx context.Context, config *SealConfig) error { + return fmt.Errorf("recovery not supported") +} + +func (d *defaultSeal) SetCachedRecoveryConfig(config *SealConfig) { +} + +func (d *defaultSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error { + return fmt.Errorf("recovery not supported") +} + +func (d *defaultSeal) SetRecoveryKey(ctx context.Context, key []byte) error { + return fmt.Errorf("recovery not supported") +} + +// SealConfig is used to describe the seal configuration +type SealConfig struct { + // The type, for sanity checking + Type string `json:"type" mapstructure:"type"` + + // SecretShares is the number of shares the secret is split into. This is + // the N value of Shamir. + SecretShares int `json:"secret_shares" mapstructure:"secret_shares"` + + // SecretThreshold is the number of parts required to open the vault. This + // is the T value of Shamir. + SecretThreshold int `json:"secret_threshold" mapstructure:"secret_threshold"` + + // PGPKeys is the array of public PGP keys used, if requested, to encrypt + // the output unseal tokens. If provided, it sets the value of + // SecretShares. Ordering is important. + PGPKeys []string `json:"pgp_keys" mapstructure:"pgp_keys"` + + // Nonce is a nonce generated by Vault used to ensure that when unseal keys + // are submitted for a rekey operation, the rekey operation itself is the + // one intended. This prevents hijacking of the rekey operation, since it + // is unauthenticated. + Nonce string `json:"nonce" mapstructure:"nonce"` + + // Backup indicates whether or not a backup of PGP-encrypted unseal keys + // should be stored at coreUnsealKeysBackupPath after successful rekeying. + Backup bool `json:"backup" mapstructure:"backup"` + + // How many keys to store, for seals that support storage. Always 0 or 1. + StoredShares int `json:"stored_shares" mapstructure:"stored_shares"` + + // Stores the progress of the rekey operation (key shares) + RekeyProgress [][]byte `json:"-"` + + // VerificationRequired indicates that after a rekey validation must be + // performed (via providing shares from the new key) before the new key is + // actually installed. This is omitted from JSON as we don't persist the + // new key, it lives only in memory. + VerificationRequired bool `json:"-"` + + // VerificationKey is the new key that we will roll to after successful + // validation + VerificationKey []byte `json:"-"` + + // VerificationNonce stores the current operation nonce for verification + VerificationNonce string `json:"-"` + + // Stores the progress of the verification operation (key shares) + VerificationProgress [][]byte `json:"-"` +} + +// Validate is used to sanity check the seal configuration +func (s *SealConfig) Validate() error { + if s.SecretShares < 1 { + return fmt.Errorf("shares must be at least one") + } + if s.SecretThreshold < 1 { + return fmt.Errorf("threshold must be at least one") + } + if s.SecretShares > 1 && s.SecretThreshold == 1 { + return fmt.Errorf("threshold must be greater than one for multiple shares") + } + if s.SecretShares > 255 { + return fmt.Errorf("shares must be less than 256") + } + if s.SecretThreshold > 255 { + return fmt.Errorf("threshold must be less than 256") + } + if s.SecretThreshold > s.SecretShares { + return fmt.Errorf("threshold cannot be larger than shares") + } + if s.StoredShares > 1 { + return fmt.Errorf("stored keys cannot be larger than 1") + } + if len(s.PGPKeys) > 0 && len(s.PGPKeys) != s.SecretShares { + return fmt.Errorf("count mismatch between number of provided PGP keys and number of shares") + } + if len(s.PGPKeys) > 0 { + for _, keystring := range s.PGPKeys { + data, err := base64.StdEncoding.DecodeString(keystring) + if err != nil { + return errwrap.Wrapf("error decoding given PGP key: {{err}}", err) + } + _, err = openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) + if err != nil { + return errwrap.Wrapf("error parsing given PGP key: {{err}}", err) + } + } + } + return nil +} + +func (s *SealConfig) Clone() *SealConfig { + ret := &SealConfig{ + Type: s.Type, + SecretShares: s.SecretShares, + SecretThreshold: s.SecretThreshold, + Nonce: s.Nonce, + Backup: s.Backup, + StoredShares: s.StoredShares, + VerificationRequired: s.VerificationRequired, + VerificationNonce: s.VerificationNonce, + } + if len(s.PGPKeys) > 0 { + ret.PGPKeys = make([]string, len(s.PGPKeys)) + copy(ret.PGPKeys, s.PGPKeys) + } + if len(s.VerificationKey) > 0 { + ret.VerificationKey = make([]byte, len(s.VerificationKey)) + copy(ret.VerificationKey, s.VerificationKey) + } + return ret +} + +func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor seal.Encryptor, keys [][]byte) error { + if keys == nil { + return fmt.Errorf("keys were nil") + } + if len(keys) == 0 { + return fmt.Errorf("no keys provided") + } + + buf, err := json.Marshal(keys) + if err != nil { + return errwrap.Wrapf("failed to encode keys for storage: {{err}}", err) + } + + // Encrypt and marshal the keys + blobInfo, err := encryptor.Encrypt(ctx, buf) + if err != nil { + return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) + } + + value, err := proto.Marshal(blobInfo) + if err != nil { + return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err) + } + + // Store the seal configuration. + pe := &physical.Entry{ + Key: StoredBarrierKeysPath, + Value: value, + } + + if err := storage.Put(ctx, pe); err != nil { + return errwrap.Wrapf("failed to write keys to storage: {{err}}", err) + } + + return nil +} + +func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor seal.Encryptor) ([][]byte, error) { + pe, err := storage.Get(ctx, StoredBarrierKeysPath) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch stored keys: {{err}}", err) + } + + // This is not strictly an error; we may not have any stored keys, for + // instance, if we're not initialized + if pe == nil { + return nil, nil + } + + blobInfo := &physical.EncryptedBlobInfo{} + if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { + return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + } + + pt, err := encryptor.Decrypt(ctx, blobInfo) + if err != nil { + return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) + } + + // Decode the barrier entry + var keys [][]byte + if err := json.Unmarshal(pt, &keys); err != nil { + return nil, fmt.Errorf("failed to decode stored keys: %v", err) + } + + return keys, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal/envelope.go b/vendor/github.com/hashicorp/vault/vault/seal/envelope.go new file mode 100644 index 00000000..cdd6fcb8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal/envelope.go @@ -0,0 +1,72 @@ +package seal + +import ( + "crypto/aes" + "crypto/cipher" + "errors" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" +) + +type Envelope struct{} + +type EnvelopeInfo struct { + Ciphertext []byte + Key []byte + IV []byte +} + +func NewEnvelope() *Envelope { + return &Envelope{} +} + +func (e *Envelope) Encrypt(plaintext []byte) (*EnvelopeInfo, error) { + defer metrics.MeasureSince([]string{"seal", "envelope", "encrypt"}, time.Now()) + + // Generate DEK + key, err := uuid.GenerateRandomBytes(32) + if err != nil { + return nil, err + } + iv, err := uuid.GenerateRandomBytes(12) + if err != nil { + return nil, err + } + aead, err := e.aeadEncrypter(key) + if err != nil { + return nil, err + } + return &EnvelopeInfo{ + Ciphertext: aead.Seal(nil, iv, plaintext, nil), + Key: key, + IV: iv, + }, nil +} + +func (e *Envelope) Decrypt(data *EnvelopeInfo) ([]byte, error) { + defer metrics.MeasureSince([]string{"seal", "envelope", "decrypt"}, time.Now()) + + aead, err := e.aeadEncrypter(data.Key) + if err != nil { + return nil, err + } + return aead.Open(nil, data.IV, data.Ciphertext, nil) +} + +func (e *Envelope) aeadEncrypter(key []byte) (cipher.AEAD, error) { + aesCipher, err := aes.NewCipher(key) + if err != nil { + return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err) + } + + // Create the GCM mode AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, errors.New("failed to initialize GCM mode") + } + + return gcm, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal/seal.go b/vendor/github.com/hashicorp/vault/vault/seal/seal.go new file mode 100644 index 00000000..fd1deb3f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal/seal.go @@ -0,0 +1,40 @@ +package seal + +import ( + "context" + + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + Shamir = "shamir" + PKCS11 = "pkcs11" + AliCloudKMS = "alicloudkms" + AWSKMS = "awskms" + GCPCKMS = "gcpckms" + AzureKeyVault = "azurekeyvault" + OCIKMS = "ocikms" + Transit = "transit" + Test = "test-auto" + + // HSMAutoDeprecated is a deprecated seal type prior to 0.9.0. + // It is still referenced in certain code paths for upgrade purporses + HSMAutoDeprecated = "hsm-auto" +) + +type Encryptor interface { + Encrypt(context.Context, []byte) (*physical.EncryptedBlobInfo, error) + Decrypt(context.Context, *physical.EncryptedBlobInfo) ([]byte, error) +} + +// Access is the embedded implementation of autoSeal that contains logic +// specific to encrypting and decrypting data, or in this case keys. +type Access interface { + SealType() string + KeyID() string + + Init(context.Context) error + Finalize(context.Context) error + + Encryptor +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go new file mode 100644 index 00000000..ccd37b2f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go @@ -0,0 +1,89 @@ +package seal + +import ( + "context" + + "github.com/hashicorp/vault/helper/xor" + "github.com/hashicorp/vault/sdk/physical" +) + +type TestSeal struct { + Type string + secret []byte + keyId string +} + +var _ Access = (*TestSeal)(nil) + +func NewTestSeal(secret []byte) *TestSeal { + return &TestSeal{ + Type: Test, + secret: secret, + keyId: "static-key", + } +} + +func (s *TestSeal) Init(_ context.Context) error { + return nil +} + +func (t *TestSeal) Finalize(_ context.Context) error { + return nil +} + +func (t *TestSeal) SealType() string { + return t.Type +} + +func (t *TestSeal) KeyID() string { + return t.keyId +} + +func (t *TestSeal) SetKeyID(k string) { + t.keyId = k +} + +func (t *TestSeal) Encrypt(_ context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) { + ct, err := t.obscureBytes(plaintext) + if err != nil { + return nil, err + } + + return &physical.EncryptedBlobInfo{ + Ciphertext: ct, + KeyInfo: &physical.SealKeyInfo{ + KeyID: t.KeyID(), + }, + }, nil +} + +func (t *TestSeal) Decrypt(_ context.Context, dwi *physical.EncryptedBlobInfo) ([]byte, error) { + return t.obscureBytes(dwi.Ciphertext) +} + +// obscureBytes is a helper to simulate "encryption/decryption" +// on protected values. +func (t *TestSeal) obscureBytes(in []byte) ([]byte, error) { + out := make([]byte, len(in)) + + if len(t.secret) != 0 { + // make sure they are the same length + localSecret := make([]byte, len(in)) + copy(localSecret, t.secret) + + var err error + + out, err = xor.XORBytes(in, localSecret) + if err != nil { + return nil, err + } + + } else { + // if there is no secret, simply reverse the string + for i := 0; i < len(in); i++ { + out[i] = in[len(in)-1-i] + } + } + + return out, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal/shamir/shamir.go b/vendor/github.com/hashicorp/vault/vault/seal/shamir/shamir.go new file mode 100644 index 00000000..5054cc1d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal/shamir/shamir.go @@ -0,0 +1,114 @@ +package shamir + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "errors" + "fmt" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" +) + +// ShamirSeal implements the seal.Access interface for Shamir unseal +type ShamirSeal struct { + logger log.Logger + key []byte + aead cipher.AEAD +} + +// Ensure that we are implementing AutoSealAccess +var _ seal.Access = (*ShamirSeal)(nil) + +// NewSeal creates a new ShamirSeal with the provided logger +func NewSeal(logger log.Logger) *ShamirSeal { + seal := &ShamirSeal{ + logger: logger, + } + return seal +} + +func (s *ShamirSeal) GetKey() []byte { + return s.key +} + +func (s *ShamirSeal) SetKey(key []byte) error { + aesCipher, err := aes.NewCipher(key) + if err != nil { + return err + } + + aead, err := cipher.NewGCM(aesCipher) + if err != nil { + return err + } + + s.key = key + s.aead = aead + return nil +} + +// Init is called during core.Initialize. No-op at the moment. +func (s *ShamirSeal) Init(_ context.Context) error { + return nil +} + +// Finalize is called during shutdown. This is a no-op since +// ShamirSeal doesn't require any cleanup. +func (s *ShamirSeal) Finalize(_ context.Context) error { + return nil +} + +// SealType returns the seal type for this particular seal implementation. +func (s *ShamirSeal) SealType() string { + return seal.Shamir +} + +// KeyID returns the last known key id. +func (s *ShamirSeal) KeyID() string { + return "" +} + +// Encrypt is used to encrypt the plaintext using the aead held by the seal. +func (s *ShamirSeal) Encrypt(_ context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) { + if plaintext == nil { + return nil, fmt.Errorf("given plaintext for encryption is nil") + } + + if s.aead == nil { + return nil, errors.New("aead is not configured in the seal") + } + + iv, err := uuid.GenerateRandomBytes(12) + if err != nil { + return nil, err + } + + ciphertext := s.aead.Seal(nil, iv, plaintext, nil) + + return &physical.EncryptedBlobInfo{ + Ciphertext: append(iv, ciphertext...), + }, nil +} + +func (s *ShamirSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("given plaintext for encryption is nil") + } + + if s.aead == nil { + return nil, errors.New("aead is not configured in the seal") + } + + iv, ciphertext := in.Ciphertext[:12], in.Ciphertext[12:] + + plaintext, err := s.aead.Open(nil, iv, ciphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal_access.go b/vendor/github.com/hashicorp/vault/vault/seal_access.go new file mode 100644 index 00000000..5f44433c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal_access.go @@ -0,0 +1,47 @@ +package vault + +import ( + "context" +) + +// SealAccess is a wrapper around Seal that exposes accessor methods +// through Core.SealAccess() while restricting the ability to modify +// Core.seal itself. +type SealAccess struct { + seal Seal +} + +func NewSealAccess(seal Seal) *SealAccess { + return &SealAccess{seal: seal} +} + +func (s *SealAccess) StoredKeysSupported() StoredKeysSupport { + return s.seal.StoredKeysSupported() +} + +func (s *SealAccess) BarrierType() string { + return s.seal.BarrierType() +} + +func (s *SealAccess) BarrierConfig(ctx context.Context) (*SealConfig, error) { + return s.seal.BarrierConfig(ctx) +} + +func (s *SealAccess) RecoveryKeySupported() bool { + return s.seal.RecoveryKeySupported() +} + +func (s *SealAccess) RecoveryConfig(ctx context.Context) (*SealConfig, error) { + return s.seal.RecoveryConfig(ctx) +} + +func (s *SealAccess) VerifyRecoveryKey(ctx context.Context, key []byte) error { + return s.seal.VerifyRecoveryKey(ctx, key) +} + +func (s *SealAccess) ClearCaches(ctx context.Context) { + s.seal.SetBarrierConfig(ctx, nil) + if s.RecoveryKeySupported() { + s.seal.SetRecoveryConfig(ctx, nil) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go b/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go new file mode 100644 index 00000000..cb8af840 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go @@ -0,0 +1,502 @@ +package vault + +import ( + "context" + "crypto/subtle" + "encoding/json" + "fmt" + "sync/atomic" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" +) + +// barrierTypeUpgradeCheck checks for backwards compat on barrier type, not +// applicable in the OSS side +var barrierTypeUpgradeCheck = func(_ string, _ *SealConfig) {} + +// autoSeal is a Seal implementation that contains logic for encrypting and +// decrypting stored keys via an underlying AutoSealAccess implementation, as +// well as logic related to recovery keys and barrier config. +type autoSeal struct { + seal.Access + + barrierConfig atomic.Value + recoveryConfig atomic.Value + core *Core + logger log.Logger +} + +// Ensure we are implementing the Seal interface +var _ Seal = (*autoSeal)(nil) + +func NewAutoSeal(lowLevel seal.Access) *autoSeal { + ret := &autoSeal{ + Access: lowLevel, + } + ret.barrierConfig.Store((*SealConfig)(nil)) + ret.recoveryConfig.Store((*SealConfig)(nil)) + return ret +} + +func (d *autoSeal) SealWrapable() bool { + return true +} + +func (d *autoSeal) GetAccess() seal.Access { + return d.Access +} + +func (d *autoSeal) checkCore() error { + if d.core == nil { + return fmt.Errorf("seal does not have a core set") + } + return nil +} + +func (d *autoSeal) SetCore(core *Core) { + d.core = core + if d.logger == nil { + d.logger = d.core.Logger().Named("autoseal") + d.core.AddLogger(d.logger) + } +} + +func (d *autoSeal) Init(ctx context.Context) error { + return d.Access.Init(ctx) +} + +func (d *autoSeal) Finalize(ctx context.Context) error { + return d.Access.Finalize(ctx) +} + +func (d *autoSeal) BarrierType() string { + return d.SealType() +} + +func (d *autoSeal) StoredKeysSupported() StoredKeysSupport { + return StoredKeysSupportedGeneric +} + +func (d *autoSeal) RecoveryKeySupported() bool { + return true +} + +// SetStoredKeys uses the autoSeal.Access.Encrypts method to wrap the keys. The stored entry +// does not need to be seal wrapped in this case. +func (d *autoSeal) SetStoredKeys(ctx context.Context, keys [][]byte) error { + return writeStoredKeys(ctx, d.core.physical, d, keys) +} + +// GetStoredKeys retrieves the key shares by unwrapping the encrypted key using the +// autoseal. +func (d *autoSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) { + return readStoredKeys(ctx, d.core.physical, d) +} + +func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { + pe, err := d.core.physical.Get(ctx, StoredBarrierKeysPath) + if err != nil { + return errwrap.Wrapf("failed to fetch stored keys: {{err}}", err) + } + if pe == nil { + return fmt.Errorf("no stored keys found") + } + + blobInfo := &physical.EncryptedBlobInfo{} + if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { + return errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + } + + if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { + d.logger.Info("upgrading stored keys") + + pt, err := d.Decrypt(ctx, blobInfo) + if err != nil { + return errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) + } + + // Decode the barrier entry + var keys [][]byte + if err := json.Unmarshal(pt, &keys); err != nil { + return errwrap.Wrapf("failed to decode stored keys: {{err}}", err) + } + + if err := d.SetStoredKeys(ctx, keys); err != nil { + return errwrap.Wrapf("failed to save upgraded stored keys: {{err}}", err) + } + } + return nil +} + +// UpgradeKeys re-encrypts and saves the stored keys and the recovery key +// with the current key if the current KeyID is different from the KeyID +// the stored keys and the recovery key are encrypted with. The provided +// Context must be non-nil. +func (d *autoSeal) UpgradeKeys(ctx context.Context) error { + // Many of the seals update their keys to the latest KeyID when Encrypt + // is called. + if _, err := d.Encrypt(ctx, []byte("a")); err != nil { + return err + } + + if err := d.upgradeRecoveryKey(ctx); err != nil { + return err + } + if err := d.upgradeStoredKeys(ctx); err != nil { + return err + } + return nil +} + +func (d *autoSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { + if d.barrierConfig.Load().(*SealConfig) != nil { + return d.barrierConfig.Load().(*SealConfig).Clone(), nil + } + + if err := d.checkCore(); err != nil { + return nil, err + } + + sealType := "barrier" + + entry, err := d.core.physical.Get(ctx, barrierSealConfigPath) + if err != nil { + d.logger.Error("failed to read seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err) + } + + // If the seal configuration is missing, we are not initialized + if entry == nil { + if d.logger.IsInfo() { + d.logger.Info("seal configuration missing, not initialized", "seal_type", sealType) + } + return nil, nil + } + + conf := &SealConfig{} + err = json.Unmarshal(entry.Value, conf) + if err != nil { + d.logger.Error("failed to decode seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err) + } + + // Check for a valid seal configuration + if err := conf.Validate(); err != nil { + d.logger.Error("invalid seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err) + } + + barrierTypeUpgradeCheck(d.BarrierType(), conf) + + if conf.Type != d.BarrierType() { + d.logger.Error("barrier seal type does not match loaded type", "seal_type", conf.Type, "loaded_type", d.BarrierType()) + return nil, fmt.Errorf("barrier seal type of %q does not match loaded type of %q", conf.Type, d.BarrierType()) + } + + d.barrierConfig.Store(conf) + return conf.Clone(), nil +} + +func (d *autoSeal) SetBarrierConfig(ctx context.Context, conf *SealConfig) error { + if err := d.checkCore(); err != nil { + return err + } + + if conf == nil { + d.barrierConfig.Store((*SealConfig)(nil)) + return nil + } + + conf.Type = d.BarrierType() + + // Encode the seal configuration + buf, err := json.Marshal(conf) + if err != nil { + return errwrap.Wrapf("failed to encode barrier seal configuration: {{err}}", err) + } + + // Store the seal configuration + pe := &physical.Entry{ + Key: barrierSealConfigPath, + Value: buf, + } + + if err := d.core.physical.Put(ctx, pe); err != nil { + d.logger.Error("failed to write barrier seal configuration", "error", err) + return errwrap.Wrapf("failed to write barrier seal configuration: {{err}}", err) + } + + d.barrierConfig.Store(conf.Clone()) + + return nil +} + +func (d *autoSeal) SetCachedBarrierConfig(config *SealConfig) { + d.barrierConfig.Store(config) +} + +func (d *autoSeal) RecoveryType() string { + return RecoveryTypeShamir +} + +// RecoveryConfig returns the recovery config on recoverySealConfigPlaintextPath. +func (d *autoSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) { + if d.recoveryConfig.Load().(*SealConfig) != nil { + return d.recoveryConfig.Load().(*SealConfig).Clone(), nil + } + + if err := d.checkCore(); err != nil { + return nil, err + } + + sealType := "recovery" + + var entry *physical.Entry + var err error + entry, err = d.core.physical.Get(ctx, recoverySealConfigPlaintextPath) + if err != nil { + d.logger.Error("failed to read seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err) + } + + if entry == nil { + if d.core.Sealed() { + d.logger.Info("seal configuration missing, but cannot check old path as core is sealed", "seal_type", sealType) + return nil, nil + } + + // Check the old recovery seal config path so an upgraded standby will + // return the correct seal config + be, err := d.core.barrier.Get(ctx, recoverySealConfigPath) + if err != nil { + return nil, errwrap.Wrapf("failed to read old recovery seal configuration: {{err}}", err) + } + + // If the seal configuration is missing, then we are not initialized. + if be == nil { + if d.logger.IsInfo() { + d.logger.Info("seal configuration missing, not initialized", "seal_type", sealType) + } + return nil, nil + } + + // Reconstruct the physical entry + entry = &physical.Entry{ + Key: be.Key, + Value: be.Value, + } + } + + conf := &SealConfig{} + if err := json.Unmarshal(entry.Value, conf); err != nil { + d.logger.Error("failed to decode seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err) + } + + // Check for a valid seal configuration + if err := conf.Validate(); err != nil { + d.logger.Error("invalid seal configuration", "seal_type", sealType, "error", err) + return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err) + } + + if conf.Type != d.RecoveryType() { + d.logger.Error("recovery seal type does not match loaded type", "seal_type", conf.Type, "loaded_type", d.RecoveryType()) + return nil, fmt.Errorf("recovery seal type of %q does not match loaded type of %q", conf.Type, d.RecoveryType()) + } + + d.recoveryConfig.Store(conf) + return conf.Clone(), nil +} + +// SetRecoveryConfig writes the recovery configuration to the physical storage +// and sets it as the seal's recoveryConfig. +func (d *autoSeal) SetRecoveryConfig(ctx context.Context, conf *SealConfig) error { + if err := d.checkCore(); err != nil { + return err + } + + // Perform migration if applicable + if err := d.migrateRecoveryConfig(ctx); err != nil { + return err + } + + if conf == nil { + d.recoveryConfig.Store((*SealConfig)(nil)) + return nil + } + + conf.Type = d.RecoveryType() + + // Encode the seal configuration + buf, err := json.Marshal(conf) + if err != nil { + return errwrap.Wrapf("failed to encode recovery seal configuration: {{err}}", err) + } + + // Store the seal configuration directly in the physical storage + pe := &physical.Entry{ + Key: recoverySealConfigPlaintextPath, + Value: buf, + } + + if err := d.core.physical.Put(ctx, pe); err != nil { + d.logger.Error("failed to write recovery seal configuration", "error", err) + return errwrap.Wrapf("failed to write recovery seal configuration: {{err}}", err) + } + + d.recoveryConfig.Store(conf.Clone()) + + return nil +} + +func (d *autoSeal) SetCachedRecoveryConfig(config *SealConfig) { + d.recoveryConfig.Store(config) +} + +func (d *autoSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error { + if key == nil { + return fmt.Errorf("recovery key to verify is nil") + } + + pt, err := d.getRecoveryKeyInternal(ctx) + if err != nil { + return err + } + + if subtle.ConstantTimeCompare(key, pt) != 1 { + return fmt.Errorf("recovery key does not match submitted values") + } + + return nil +} + +func (d *autoSeal) SetRecoveryKey(ctx context.Context, key []byte) error { + if err := d.checkCore(); err != nil { + return err + } + + if key == nil { + return fmt.Errorf("recovery key to store is nil") + } + + // Encrypt and marshal the keys + blobInfo, err := d.Encrypt(ctx, key) + if err != nil { + return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) + } + + value, err := proto.Marshal(blobInfo) + if err != nil { + return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err) + } + + be := &physical.Entry{ + Key: recoveryKeyPath, + Value: value, + } + + if err := d.core.physical.Put(ctx, be); err != nil { + d.logger.Error("failed to write recovery key", "error", err) + return errwrap.Wrapf("failed to write recovery key: {{err}}", err) + } + + return nil +} + +func (d *autoSeal) RecoveryKey(ctx context.Context) ([]byte, error) { + return d.getRecoveryKeyInternal(ctx) +} + +func (d *autoSeal) getRecoveryKeyInternal(ctx context.Context) ([]byte, error) { + pe, err := d.core.physical.Get(ctx, recoveryKeyPath) + if err != nil { + d.logger.Error("failed to read recovery key", "error", err) + return nil, errwrap.Wrapf("failed to read recovery key: {{err}}", err) + } + if pe == nil { + d.logger.Warn("no recovery key found") + return nil, fmt.Errorf("no recovery key found") + } + + blobInfo := &physical.EncryptedBlobInfo{} + if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { + return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + } + + pt, err := d.Decrypt(ctx, blobInfo) + if err != nil { + return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) + } + + return pt, nil +} + +func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { + pe, err := d.core.physical.Get(ctx, recoveryKeyPath) + if err != nil { + return errwrap.Wrapf("failed to fetch recovery key: {{err}}", err) + } + if pe == nil { + return fmt.Errorf("no recovery key found") + } + + blobInfo := &physical.EncryptedBlobInfo{} + if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { + return errwrap.Wrapf("failed to proto decode recovery key: {{err}}", err) + } + + if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { + d.logger.Info("upgrading recovery key") + + pt, err := d.Decrypt(ctx, blobInfo) + if err != nil { + return errwrap.Wrapf("failed to decrypt encrypted recovery key: {{err}}", err) + + } + if err := d.SetRecoveryKey(ctx, pt); err != nil { + return errwrap.Wrapf("failed to save upgraded recovery key: {{err}}", err) + } + } + return nil +} + +// migrateRecoveryConfig is a helper func to migrate the recovery config to +// live outside the barrier. This is called from SetRecoveryConfig which is +// always called with the stateLock. +func (d *autoSeal) migrateRecoveryConfig(ctx context.Context) error { + // Get config from the old recoverySealConfigPath path + be, err := d.core.barrier.Get(ctx, recoverySealConfigPath) + if err != nil { + return errwrap.Wrapf("failed to read old recovery seal configuration during migration: {{err}}", err) + } + + // If this entry is nil, then skip migration + if be == nil { + return nil + } + + // Only log if we are performing the migration + d.logger.Debug("migrating recovery seal configuration") + defer d.logger.Debug("done migrating recovery seal configuration") + + // Perform migration + pe := &physical.Entry{ + Key: recoverySealConfigPlaintextPath, + Value: be.Value, + } + + if err := d.core.physical.Put(ctx, pe); err != nil { + return errwrap.Wrapf("failed to write recovery seal configuration during migration: {{err}}", err) + } + + // Perform deletion of the old entry + if err := d.core.barrier.Delete(ctx, recoverySealConfigPath); err != nil { + return errwrap.Wrapf("failed to delete old recovery seal configuration during migration: {{err}}", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal_testing.go new file mode 100644 index 00000000..14379146 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal_testing.go @@ -0,0 +1,54 @@ +package vault + +import ( + "context" + + log "github.com/hashicorp/go-hclog" + testing "github.com/mitchellh/go-testing-interface" +) + +type TestSealOpts struct { + Logger log.Logger + StoredKeys StoredKeysSupport + Secret []byte +} + +func TestCoreUnsealedWithConfigs(t testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) { + t.Helper() + opts := &TestSealOpts{} + if recoveryConf == nil { + opts.StoredKeys = StoredKeysSupportedShamirMaster + } + return TestCoreUnsealedWithConfigSealOpts(t, barrierConf, recoveryConf, opts) +} + +func TestCoreUnsealedWithConfigSealOpts(t testing.T, barrierConf, recoveryConf *SealConfig, sealOpts *TestSealOpts) (*Core, [][]byte, [][]byte, string) { + t.Helper() + seal := NewTestSeal(t, sealOpts) + core := TestCoreWithSeal(t, seal, false) + result, err := core.Initialize(context.Background(), &InitParams{ + BarrierConfig: barrierConf, + RecoveryConfig: recoveryConf, + LegacyShamirSeal: sealOpts.StoredKeys == StoredKeysNotSupported, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + err = core.UnsealWithStoredKeys(context.Background()) + if err != nil && IsFatalError(err) { + t.Fatalf("err: %s", err) + } + if core.Sealed() { + for _, key := range result.SecretShares { + if _, err := core.Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + if core.Sealed() { + t.Fatal("should not be sealed") + } + } + + return core, result.SecretShares, result.RecoveryShares, result.RootToken +} diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go b/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go new file mode 100644 index 00000000..b919f4a0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go @@ -0,0 +1,41 @@ +package vault + +import ( + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" + testing "github.com/mitchellh/go-testing-interface" +) + +func NewTestSeal(t testing.T, opts *TestSealOpts) Seal { + t.Helper() + if opts == nil { + opts = &TestSealOpts{} + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(hclog.Debug) + } + + switch opts.StoredKeys { + case StoredKeysSupportedShamirMaster: + newSeal := NewDefaultSeal(shamirseal.NewSeal(opts.Logger)) + // Need StoredShares set or this will look like a legacy shamir seal. + newSeal.SetCachedBarrierConfig(&SealConfig{ + StoredShares: 1, + SecretThreshold: 1, + SecretShares: 1, + }) + return newSeal + case StoredKeysNotSupported: + newSeal := NewDefaultSeal(shamirseal.NewSeal(opts.Logger)) + newSeal.SetCachedBarrierConfig(&SealConfig{ + StoredShares: 0, + SecretThreshold: 1, + SecretShares: 1, + }) + return newSeal + default: + return NewAutoSeal(seal.NewTestSeal(opts.Secret)) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go b/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go new file mode 100644 index 00000000..461db11e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go @@ -0,0 +1,180 @@ +// +build !enterprise + +package vault + +import ( + "context" + "fmt" + "sync/atomic" + + proto "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/physical" +) + +// NewSealUnwrapper creates a new seal unwrapper +func NewSealUnwrapper(underlying physical.Backend, logger log.Logger) physical.Backend { + ret := &sealUnwrapper{ + underlying: underlying, + logger: logger, + locks: locksutil.CreateLocks(), + allowUnwraps: new(uint32), + } + + if underTxn, ok := underlying.(physical.Transactional); ok { + return &transactionalSealUnwrapper{ + sealUnwrapper: ret, + Transactional: underTxn, + } + } + + return ret +} + +var _ physical.Backend = (*sealUnwrapper)(nil) +var _ physical.Transactional = (*transactionalSealUnwrapper)(nil) + +type sealUnwrapper struct { + underlying physical.Backend + logger log.Logger + locks []*locksutil.LockEntry + allowUnwraps *uint32 +} + +// transactionalSealUnwrapper is a seal unwrapper that wraps a physical that is transactional +type transactionalSealUnwrapper struct { + *sealUnwrapper + physical.Transactional +} + +func (d *sealUnwrapper) Put(ctx context.Context, entry *physical.Entry) error { + if entry == nil { + return nil + } + + locksutil.LockForKey(d.locks, entry.Key).Lock() + defer locksutil.LockForKey(d.locks, entry.Key).Unlock() + + return d.underlying.Put(ctx, entry) +} + +func (d *sealUnwrapper) Get(ctx context.Context, key string) (*physical.Entry, error) { + entry, err := d.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var performUnwrap bool + se := &physical.EncryptedBlobInfo{} + // If the value ends in our canary value, try to decode the bytes. + eLen := len(entry.Value) + if eLen > 0 && entry.Value[eLen-1] == 's' { + if err := proto.Unmarshal(entry.Value[:eLen-1], se); err == nil { + // We unmarshaled successfully which means we need to store it as a + // non-proto message + performUnwrap = true + } + } + if !performUnwrap { + return entry, nil + } + // It's actually encrypted and we can't read it + if se.Wrapped { + return nil, fmt.Errorf("cannot decode sealwrapped storage entry %q", entry.Key) + } + if atomic.LoadUint32(d.allowUnwraps) != 1 { + return &physical.Entry{ + Key: entry.Key, + Value: se.Ciphertext, + }, nil + } + + locksutil.LockForKey(d.locks, key).Lock() + defer locksutil.LockForKey(d.locks, key).Unlock() + + // At this point we need to re-read and re-check + entry, err = d.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + performUnwrap = false + se = &physical.EncryptedBlobInfo{} + // If the value ends in our canary value, try to decode the bytes. + eLen = len(entry.Value) + if eLen > 0 && entry.Value[eLen-1] == 's' { + // We ignore an error because the canary is not a guarantee; if it + // doesn't decode, proceed normally + if err := proto.Unmarshal(entry.Value[:eLen-1], se); err == nil { + // We unmarshaled successfully which means we need to store it as a + // non-proto message + performUnwrap = true + } + } + if !performUnwrap { + return entry, nil + } + if se.Wrapped { + return nil, fmt.Errorf("cannot decode sealwrapped storage entry %q", entry.Key) + } + + entry = &physical.Entry{ + Key: entry.Key, + Value: se.Ciphertext, + } + + if atomic.LoadUint32(d.allowUnwraps) != 1 { + return entry, nil + } + return entry, d.underlying.Put(ctx, entry) +} + +func (d *sealUnwrapper) Delete(ctx context.Context, key string) error { + locksutil.LockForKey(d.locks, key).Lock() + defer locksutil.LockForKey(d.locks, key).Unlock() + + return d.underlying.Delete(ctx, key) +} + +func (d *sealUnwrapper) List(ctx context.Context, prefix string) ([]string, error) { + return d.underlying.List(ctx, prefix) +} + +func (d *transactionalSealUnwrapper) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + // Collect keys that need to be locked + var keys []string + for _, curr := range txns { + keys = append(keys, curr.Entry.Key) + } + // Lock the keys + for _, l := range locksutil.LocksForKeys(d.locks, keys) { + l.Lock() + defer l.Unlock() + } + + if err := d.Transactional.Transaction(ctx, txns); err != nil { + return err + } + + return nil +} + +// This should only run during preSeal which ensures that it can't be run +// concurrently and that it will be run only by the active node +func (d *sealUnwrapper) stopUnwraps() { + atomic.StoreUint32(d.allowUnwraps, 0) +} + +func (d *sealUnwrapper) runUnwraps() { + // Allow key unwraps on key gets. This gets set only when running on the + // active node to prevent standbys from changing data underneath the + // primary + atomic.StoreUint32(d.allowUnwraps, 1) +} diff --git a/vendor/github.com/hashicorp/vault/vault/testing.go b/vendor/github.com/hashicorp/vault/vault/testing.go new file mode 100644 index 00000000..7f56e402 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/testing.go @@ -0,0 +1,1911 @@ +package vault + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/mitchellh/copystructure" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" + "golang.org/x/net/http2" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/reload" + dbMysql "github.com/hashicorp/vault/plugins/database/mysql" + dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + testing "github.com/mitchellh/go-testing-interface" + + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" +) + +// This file contains a number of methods that are useful for unit +// tests within other packages. + +const ( + testSharedPublicKey = ` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT +` + testSharedPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG +A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A +/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE +mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+ +GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe +nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x ++aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+ +MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8 +Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h +4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal +oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+ +Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y +6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G +IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ +CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2 +AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM +kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe +rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy +AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9 +cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X +5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D +My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t +O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi +oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F ++B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs= +-----END RSA PRIVATE KEY----- +` +) + +// TestCore returns a pure in-memory, uninitialized core for testing. +func TestCore(t testing.T) *Core { + return TestCoreWithSeal(t, nil, false) +} + +// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw +// storage endpoints are enabled with this core. +func TestCoreRaw(t testing.T) *Core { + return TestCoreWithSeal(t, nil, true) +} + +// TestCoreNewSeal returns a pure in-memory, uninitialized core with +// the new seal configuration. +func TestCoreNewSeal(t testing.T) *Core { + seal := NewTestSeal(t, nil) + return TestCoreWithSeal(t, seal, false) +} + +// TestCoreWithConfig returns a pure in-memory, uninitialized core with the +// specified core configurations overridden for testing. +func TestCoreWithConfig(t testing.T, conf *CoreConfig) *Core { + return TestCoreWithSealAndUI(t, conf) +} + +// TestCoreWithSeal returns a pure in-memory, uninitialized core with the +// specified seal for testing. +func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core { + conf := &CoreConfig{ + Seal: testSeal, + EnableUI: false, + EnableRaw: enableRaw, + BuiltinRegistry: NewMockBuiltinRegistry(), + } + return TestCoreWithSealAndUI(t, conf) +} + +func TestCoreUI(t testing.T, enableUI bool) *Core { + conf := &CoreConfig{ + EnableUI: enableUI, + EnableRaw: true, + BuiltinRegistry: NewMockBuiltinRegistry(), + } + return TestCoreWithSealAndUI(t, conf) +} + +func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core { + logger := logging.NewVaultLogger(log.Trace) + physicalBackend, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + + errInjector := physical.NewErrorInjector(physicalBackend, 0, logger) + + // Start off with base test core config + conf := testCoreConfig(t, errInjector, logger) + + // Override config values with ones that gets passed in + conf.EnableUI = opts.EnableUI + conf.EnableRaw = opts.EnableRaw + conf.Seal = opts.Seal + conf.LicensingConfig = opts.LicensingConfig + conf.DisableKeyEncodingChecks = opts.DisableKeyEncodingChecks + conf.MetricsHelper = opts.MetricsHelper + + if opts.Logger != nil { + conf.Logger = opts.Logger + } + + for k, v := range opts.LogicalBackends { + conf.LogicalBackends[k] = v + } + for k, v := range opts.CredentialBackends { + conf.CredentialBackends[k] = v + } + + for k, v := range opts.AuditBackends { + conf.AuditBackends[k] = v + } + + c, err := NewCore(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + return c +} + +func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig { + t.Helper() + noopAudits := map[string]audit.Factory{ + "noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) { + view := &logical.InmemStorage{} + view.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + config.SaltConfig = &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + } + config.SaltView = view + + n := &noopAudit{ + Config: config, + } + n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + SaltFunc: n.Salt, + } + return n, nil + }, + } + + noopBackends := make(map[string]logical.Factory) + noopBackends["noop"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeCredential + return b, nil + } + noopBackends["http"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + return new(rawHTTP), nil + } + + credentialBackends := make(map[string]logical.Factory) + for backendName, backendFactory := range noopBackends { + credentialBackends[backendName] = backendFactory + } + for backendName, backendFactory := range testCredentialBackends { + credentialBackends[backendName] = backendFactory + } + + logicalBackends := make(map[string]logical.Factory) + for backendName, backendFactory := range noopBackends { + logicalBackends[backendName] = backendFactory + } + + logicalBackends["kv"] = LeasedPassthroughBackendFactory + for backendName, backendFactory := range testLogicalBackends { + logicalBackends[backendName] = backendFactory + } + + conf := &CoreConfig{ + Physical: physicalBackend, + AuditBackends: noopAudits, + LogicalBackends: logicalBackends, + CredentialBackends: credentialBackends, + DisableMlock: true, + Logger: logger, + BuiltinRegistry: NewMockBuiltinRegistry(), + } + + return conf +} + +// TestCoreInit initializes the core with a single key, and returns +// the key that must be used to unseal the core and a root token. +func TestCoreInit(t testing.T, core *Core) ([][]byte, string) { + t.Helper() + secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil) + return secretShares, root +} + +func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handler) ([][]byte, [][]byte, string) { + t.Helper() + core.SetClusterHandler(handler) + + barrierConfig := &SealConfig{ + SecretShares: 3, + SecretThreshold: 3, + } + + switch core.seal.StoredKeysSupported() { + case StoredKeysNotSupported: + barrierConfig.StoredShares = 0 + default: + barrierConfig.StoredShares = 1 + } + + recoveryConfig := &SealConfig{ + SecretShares: 3, + SecretThreshold: 3, + } + + initParams := &InitParams{ + BarrierConfig: barrierConfig, + RecoveryConfig: recoveryConfig, + } + if core.seal.StoredKeysSupported() == StoredKeysNotSupported { + initParams.LegacyShamirSeal = true + } + result, err := core.Initialize(context.Background(), initParams) + if err != nil { + t.Fatalf("err: %s", err) + } + return result.SecretShares, result.RecoveryShares, result.RootToken +} + +func TestCoreUnseal(core *Core, key []byte) (bool, error) { + return core.Unseal(key) +} + +func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) { + return core.UnsealWithRecoveryKeys(key) +} + +// TestCoreUnsealed returns a pure in-memory core that is already +// initialized and unsealed. +func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) { + t.Helper() + core := TestCore(t) + return testCoreUnsealed(t, core) +} + +// TestCoreUnsealedRaw returns a pure in-memory core that is already +// initialized, unsealed, and with raw endpoints enabled. +func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) { + t.Helper() + core := TestCoreRaw(t) + return testCoreUnsealed(t, core) +} + +// TestCoreUnsealedWithConfig returns a pure in-memory core that is already +// initialized, unsealed, with the any provided core config values overridden. +func TestCoreUnsealedWithConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string) { + t.Helper() + core := TestCoreWithConfig(t, conf) + return testCoreUnsealed(t, core) +} + +func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) { + t.Helper() + keys, token := TestCoreInit(t, core) + for _, key := range keys { + if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + if core.Sealed() { + t.Fatal("should not be sealed") + } + + testCoreAddSecretMount(t, core, token) + + return core, keys, token +} + +func testCoreAddSecretMount(t testing.T, core *Core, token string) { + kvReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: "sys/mounts/secret", + Data: map[string]interface{}{ + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": "1", + }, + }, + } + resp, err := core.HandleRequest(namespace.RootContext(nil), kvReq) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatal(err) + } + +} + +func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) { + t.Helper() + logger := logging.NewVaultLogger(log.Trace) + conf := testCoreConfig(t, backend, logger) + conf.Seal = NewTestSeal(t, nil) + + core, err := NewCore(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + keys, token := TestCoreInit(t, core) + for _, key := range keys { + if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + if err := core.UnsealWithStoredKeys(context.Background()); err != nil { + t.Fatal(err) + } + + if core.Sealed() { + t.Fatal("should not be sealed") + } + + return core, keys, token +} + +// TestKeyCopy is a silly little function to just copy the key so that +// it can be used with Unseal easily. +func TestKeyCopy(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func TestDynamicSystemView(c *Core) *dynamicSystemView { + me := &MountEntry{ + Config: MountConfig{ + DefaultLeaseTTL: 24 * time.Hour, + MaxLeaseTTL: 2 * 24 * time.Hour, + }, + } + + return &dynamicSystemView{c, me} +} + +// TestAddTestPlugin registers the testFunc as part of the plugin command to the +// plugin catalog. If provided, uses tmpDir as the plugin directory. +func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.PluginType, testFunc string, env []string, tempDir string) { + file, err := os.Open(os.Args[0]) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + dirPath := filepath.Dir(os.Args[0]) + fileName := filepath.Base(os.Args[0]) + + if tempDir != "" { + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + + // Copy over the file to the temp dir + dst := filepath.Join(tempDir, fileName) + out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) + if err != nil { + t.Fatal(err) + } + defer out.Close() + + if _, err = io.Copy(out, file); err != nil { + t.Fatal(err) + } + err = out.Sync() + if err != nil { + t.Fatal(err) + } + + dirPath = tempDir + } + + // Determine plugin directory full path, evaluating potential symlink path + fullPath, err := filepath.EvalSymlinks(dirPath) + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(filepath.Join(fullPath, fileName)) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + // Find out the sha256 + hash := sha256.New() + + _, err = io.Copy(hash, reader) + if err != nil { + t.Fatal(err) + } + + sum := hash.Sum(nil) + + // Set core's plugin directory and plugin catalog directory + c.pluginDirectory = fullPath + c.pluginCatalog.directory = fullPath + + args := []string{fmt.Sprintf("--test.run=%s", testFunc)} + err = c.pluginCatalog.Set(context.Background(), name, pluginType, fileName, args, env, sum) + if err != nil { + t.Fatal(err) + } +} + +var testLogicalBackends = map[string]logical.Factory{} +var testCredentialBackends = map[string]logical.Factory{} + +// StartSSHHostTestServer starts the test server which responds to SSH +// authentication. Used to test the SSH secret backend. +func StartSSHHostTestServer() (string, error) { + pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testSharedPublicKey)) + if err != nil { + return "", fmt.Errorf("error parsing public key") + } + serverConfig := &ssh.ServerConfig{ + PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + if bytes.Compare(pubKey.Marshal(), key.Marshal()) == 0 { + return &ssh.Permissions{}, nil + } else { + return nil, fmt.Errorf("key does not match") + } + }, + } + signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey)) + if err != nil { + panic("Error parsing private key") + } + serverConfig.AddHostKey(signer) + + soc, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", fmt.Errorf("error listening to connection") + } + + go func() { + for { + conn, err := soc.Accept() + if err != nil { + panic(fmt.Sprintf("Error accepting incoming connection: %s", err)) + } + defer conn.Close() + sshConn, chanReqs, _, err := ssh.NewServerConn(conn, serverConfig) + if err != nil { + panic(fmt.Sprintf("Handshaking error: %v", err)) + } + + go func() { + for chanReq := range chanReqs { + go func(chanReq ssh.NewChannel) { + if chanReq.ChannelType() != "session" { + chanReq.Reject(ssh.UnknownChannelType, "unknown channel type") + return + } + + ch, requests, err := chanReq.Accept() + if err != nil { + panic(fmt.Sprintf("Error accepting channel: %s", err)) + } + + go func(ch ssh.Channel, in <-chan *ssh.Request) { + for req := range in { + executeServerCommand(ch, req) + } + }(ch, requests) + }(chanReq) + } + sshConn.Close() + }() + } + }() + return soc.Addr().String(), nil +} + +// This executes the commands requested to be run on the server. +// Used to test the SSH secret backend. +func executeServerCommand(ch ssh.Channel, req *ssh.Request) { + command := string(req.Payload[4:]) + cmd := exec.Command("/bin/bash", []string{"-c", command}...) + req.Reply(true, nil) + + cmd.Stdout = ch + cmd.Stderr = ch + cmd.Stdin = ch + + err := cmd.Start() + if err != nil { + panic(fmt.Sprintf("Error starting the command: '%s'", err)) + } + + go func() { + _, err := cmd.Process.Wait() + if err != nil { + panic(fmt.Sprintf("Error while waiting for command to finish:'%s'", err)) + } + ch.Close() + }() +} + +// This adds a credential backend for the test core. This needs to be +// invoked before the test core is created. +func AddTestCredentialBackend(name string, factory logical.Factory) error { + if name == "" { + return fmt.Errorf("missing backend name") + } + if factory == nil { + return fmt.Errorf("missing backend factory function") + } + testCredentialBackends[name] = factory + return nil +} + +// This adds a logical backend for the test core. This needs to be +// invoked before the test core is created. +func AddTestLogicalBackend(name string, factory logical.Factory) error { + if name == "" { + return fmt.Errorf("missing backend name") + } + if factory == nil { + return fmt.Errorf("missing backend factory function") + } + testLogicalBackends[name] = factory + return nil +} + +type noopAudit struct { + Config *audit.BackendConfig + salt *salt.Salt + saltMutex sync.RWMutex + formatter audit.AuditFormatter + records [][]byte + l sync.RWMutex +} + +func (n *noopAudit) GetHash(ctx context.Context, data string) (string, error) { + salt, err := n.Salt(ctx) + if err != nil { + return "", err + } + return salt.GetIdentifiedHMAC(data), nil +} + +func (n *noopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { + n.l.Lock() + defer n.l.Unlock() + var w bytes.Buffer + err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in) + if err != nil { + return err + } + n.records = append(n.records, w.Bytes()) + return nil +} + +func (n *noopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { + n.l.Lock() + defer n.l.Unlock() + var w bytes.Buffer + err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) + if err != nil { + return err + } + n.records = append(n.records, w.Bytes()) + return nil +} + +func (n *noopAudit) Reload(_ context.Context) error { + return nil +} + +func (n *noopAudit) Invalidate(_ context.Context) { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + +func (n *noopAudit) Salt(ctx context.Context) (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = salt + return salt, nil +} + +func AddNoopAudit(conf *CoreConfig) { + conf.AuditBackends = map[string]audit.Factory{ + "noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) { + view := &logical.InmemStorage{} + view.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + n := &noopAudit{ + Config: config, + } + n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + SaltFunc: n.Salt, + } + return n, nil + }, + } +} + +type rawHTTP struct{} + +func (n *rawHTTP) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPStatusCode: 200, + logical.HTTPContentType: "plain/text", + logical.HTTPRawBody: []byte("hello world"), + }, + }, nil +} + +func (n *rawHTTP) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { + return false, false, nil +} + +func (n *rawHTTP) SpecialPaths() *logical.Paths { + return &logical.Paths{Unauthenticated: []string{"*"}} +} + +func (n *rawHTTP) System() logical.SystemView { + return logical.StaticSystemView{ + DefaultLeaseTTLVal: time.Hour * 24, + MaxLeaseTTLVal: time.Hour * 24 * 32, + } +} + +func (n *rawHTTP) Logger() log.Logger { + return logging.NewVaultLogger(log.Trace) +} + +func (n *rawHTTP) Cleanup(ctx context.Context) { + // noop +} + +func (n *rawHTTP) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + return nil +} + +func (n *rawHTTP) InvalidateKey(context.Context, string) { + // noop +} + +func (n *rawHTTP) Setup(ctx context.Context, config *logical.BackendConfig) error { + // noop + return nil +} + +func (n *rawHTTP) Type() logical.BackendType { + return logical.TypeLogical +} + +func GenerateRandBytes(length int) ([]byte, error) { + if length < 0 { + return nil, fmt.Errorf("length must be >= 0") + } + + buf := make([]byte, length) + if length == 0 { + return buf, nil + } + + n, err := rand.Read(buf) + if err != nil { + return nil, err + } + if n != length { + return nil, fmt.Errorf("unable to read %d bytes; only read %d", length, n) + } + + return buf, nil +} + +func TestWaitActive(t testing.T, core *Core) { + t.Helper() + if err := TestWaitActiveWithError(core); err != nil { + t.Fatal(err) + } +} + +func TestWaitActiveWithError(core *Core) error { + start := time.Now() + var standby bool + var err error + for time.Now().Sub(start) < 30*time.Second { + standby, err = core.Standby() + if err != nil { + return err + } + if !standby { + break + } + } + if standby { + return errors.New("should not be in standby mode") + } + return nil +} + +type TestCluster struct { + BarrierKeys [][]byte + RecoveryKeys [][]byte + CACert *x509.Certificate + CACertBytes []byte + CACertPEM []byte + CACertPEMFile string + CAKey *ecdsa.PrivateKey + CAKeyPEM []byte + Cores []*TestClusterCore + ID string + RootToken string + RootCAs *x509.CertPool + TempDir string + ClientAuthRequired bool + Logger log.Logger + CleanupFunc func() + SetupFunc func() +} + +func (c *TestCluster) Start() { + for _, core := range c.Cores { + if core.Server != nil { + for _, ln := range core.Listeners { + go core.Server.Serve(ln) + } + } + } + if c.SetupFunc != nil { + c.SetupFunc() + } +} + +// UnsealCores uses the cluster barrier keys to unseal the test cluster cores +func (c *TestCluster) UnsealCores(t testing.T) { + t.Helper() + if err := c.UnsealCoresWithError(false); err != nil { + t.Fatal(err) + } +} + +func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error { + unseal := func(core *Core) error { + for _, key := range c.BarrierKeys { + if _, err := core.Unseal(TestKeyCopy(key)); err != nil { + return err + } + } + return nil + } + if useStoredKeys { + unseal = func(core *Core) error { + return core.UnsealWithStoredKeys(context.Background()) + } + } + + // Unseal first core + if err := unseal(c.Cores[0].Core); err != nil { + return fmt.Errorf("unseal core %d err: %s", 0, err) + } + + // Verify unsealed + if c.Cores[0].Sealed() { + return fmt.Errorf("should not be sealed") + } + + if err := TestWaitActiveWithError(c.Cores[0].Core); err != nil { + return err + } + + // Unseal other cores + for i := 1; i < len(c.Cores); i++ { + if err := unseal(c.Cores[i].Core); err != nil { + return fmt.Errorf("unseal core %d err: %s", i, err) + } + } + + // Let them come fully up to standby + time.Sleep(2 * time.Second) + + // Ensure cluster connection info is populated. + // Other cores should not come up as leaders. + for i := 1; i < len(c.Cores); i++ { + isLeader, _, _, err := c.Cores[i].Leader() + if err != nil { + return err + } + if isLeader { + return fmt.Errorf("core[%d] should not be leader", i) + } + } + + return nil +} + +func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { + for _, key := range c.BarrierKeys { + if _, err := core.Core.Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } +} + +func (c *TestCluster) EnsureCoresSealed(t testing.T) { + t.Helper() + if err := c.ensureCoresSealed(); err != nil { + t.Fatal(err) + } +} + +func (c *TestClusterCore) Seal(t testing.T) { + t.Helper() + if err := c.Core.sealInternal(); err != nil { + t.Fatal(err) + } +} + +func CleanupClusters(clusters []*TestCluster) { + wg := &sync.WaitGroup{} + for _, cluster := range clusters { + wg.Add(1) + lc := cluster + go func() { + defer wg.Done() + lc.Cleanup() + }() + } + wg.Wait() +} + +func (c *TestCluster) Cleanup() { + c.Logger.Info("cleaning up vault cluster") + for _, core := range c.Cores { + core.CoreConfig.Logger.SetLevel(log.Error) + } + + // Close listeners + wg := &sync.WaitGroup{} + for _, core := range c.Cores { + wg.Add(1) + lc := core + + go func() { + defer wg.Done() + if lc.Listeners != nil { + for _, ln := range lc.Listeners { + ln.Close() + } + } + if lc.licensingStopCh != nil { + close(lc.licensingStopCh) + lc.licensingStopCh = nil + } + + if err := lc.Shutdown(); err != nil { + lc.Logger().Error("error during shutdown; abandoning sealing", "error", err) + } else { + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + lc.Logger().Error("timeout waiting for core to seal") + } + if lc.Sealed() { + break + } + time.Sleep(250 * time.Millisecond) + } + } + }() + } + + wg.Wait() + + // Remove any temp dir that exists + if c.TempDir != "" { + os.RemoveAll(c.TempDir) + } + + // Give time to actually shut down/clean up before the next test + time.Sleep(time.Second) + if c.CleanupFunc != nil { + c.CleanupFunc() + } +} + +func (c *TestCluster) ensureCoresSealed() error { + for _, core := range c.Cores { + if err := core.Shutdown(); err != nil { + return err + } + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + return fmt.Errorf("timeout waiting for core to seal") + } + if core.Sealed() { + break + } + time.Sleep(250 * time.Millisecond) + } + } + return nil +} + +func SetReplicationFailureMode(core *TestClusterCore, mode uint32) { + atomic.StoreUint32(core.Core.replicationFailure, mode) +} + +type TestListener struct { + net.Listener + Address *net.TCPAddr +} + +type TestClusterCore struct { + *Core + CoreConfig *CoreConfig + Client *api.Client + Handler http.Handler + Listeners []*TestListener + ReloadFuncs *map[string][]reload.ReloadFunc + ReloadFuncsLock *sync.RWMutex + Server *http.Server + ServerCert *x509.Certificate + ServerCertBytes []byte + ServerCertPEM []byte + ServerKey *ecdsa.PrivateKey + ServerKeyPEM []byte + TLSConfig *tls.Config + UnderlyingStorage physical.Backend + UnderlyingRawStorage physical.Backend + Barrier SecurityBarrier + NodeID string +} + +type PhysicalBackendBundle struct { + Backend physical.Backend + HABackend physical.HABackend + Cleanup func() +} + +type TestClusterOptions struct { + KeepStandbysSealed bool + SkipInit bool + HandlerFunc func(*HandlerProperties) http.Handler + DefaultHandlerProperties HandlerProperties + BaseListenAddress string + NumCores int + SealFunc func() Seal + Logger log.Logger + TempDir string + CACert []byte + CAKey *ecdsa.PrivateKey + // PhysicalFactory is used to create backends. + // The int argument is the index of the core within the cluster, i.e. first + // core in cluster will have 0, second 1, etc. + // If the backend is shared across the cluster (i.e. is not Raft) then it + // should return nil when coreIdx != 0. + PhysicalFactory func(t testing.T, coreIdx int, logger hclog.Logger) *PhysicalBackendBundle + // FirstCoreNumber is used to assign a unique number to each core within + // a multi-cluster setup. + FirstCoreNumber int + RequireClientAuth bool + // SetupFunc is called after the cluster is started. + SetupFunc func(t testing.T, c *TestCluster) +} + +var DefaultNumCores = 3 + +type certInfo struct { + cert *x509.Certificate + certPEM []byte + certBytes []byte + key *ecdsa.PrivateKey + keyPEM []byte +} + +// NewTestCluster creates a new test cluster based on the provided core config +// and test cluster options. +// +// N.B. Even though a single base CoreConfig is provided, NewTestCluster will instantiate a +// core config for each core it creates. If separate seal per core is desired, opts.SealFunc +// can be provided to generate a seal for each one. Otherwise, the provided base.Seal will be +// shared among cores. NewCore's default behavior is to generate a new DefaultSeal if the +// provided Seal in coreConfig (i.e. base.Seal) is nil. +// +// If opts.Logger is provided, it takes precedence and will be used as the cluster +// logger and will be the basis for each core's logger. If no opts.Logger is +// given, one will be generated based on t.Name() for the cluster logger, and if +// no base.Logger is given will also be used as the basis for each core's logger. +func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster { + var err error + + var numCores int + if opts == nil || opts.NumCores == 0 { + numCores = DefaultNumCores + } else { + numCores = opts.NumCores + } + + var firstCoreNumber int + if opts != nil { + firstCoreNumber = opts.FirstCoreNumber + } + + certIPs := []net.IP{ + net.IPv6loopback, + net.ParseIP("127.0.0.1"), + } + var baseAddr *net.TCPAddr + if opts != nil && opts.BaseListenAddress != "" { + baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress) + if err != nil { + t.Fatal("could not parse given base IP") + } + certIPs = append(certIPs, baseAddr.IP) + } + + var testCluster TestCluster + + if opts != nil && opts.Logger != nil { + testCluster.Logger = opts.Logger + } else { + testCluster.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) + } + + if opts != nil && opts.TempDir != "" { + if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TempDir, 0700); err != nil { + t.Fatal(err) + } + } + testCluster.TempDir = opts.TempDir + } else { + tempDir, err := ioutil.TempDir("", "vault-test-cluster-") + if err != nil { + t.Fatal(err) + } + testCluster.TempDir = tempDir + } + + var caKey *ecdsa.PrivateKey + if opts != nil && opts.CAKey != nil { + caKey = opts.CAKey + } else { + caKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + } + testCluster.CAKey = caKey + var caBytes []byte + if opts != nil && len(opts.CACert) > 0 { + caBytes = opts.CACert + } else { + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + IPAddresses: certIPs, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey) + if err != nil { + t.Fatal(err) + } + } + caCert, err := x509.ParseCertificate(caBytes) + if err != nil { + t.Fatal(err) + } + testCluster.CACert = caCert + testCluster.CACertBytes = caBytes + testCluster.RootCAs = x509.NewCertPool() + testCluster.RootCAs.AddCert(caCert) + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock) + testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem") + err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0755) + if err != nil { + t.Fatal(err) + } + marshaledCAKey, err := x509.MarshalECPrivateKey(caKey) + if err != nil { + t.Fatal(err) + } + caKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock) + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755) + if err != nil { + t.Fatal(err) + } + + var certInfoSlice []*certInfo + + // + // Certs generation + // + for i := 0; i < numCores; i++ { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + IPAddresses: certIPs, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey) + if err != nil { + t.Fatal(err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatal(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + certPEM := pem.EncodeToMemory(certPEMBlock) + marshaledKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + t.Fatal(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + keyPEM := pem.EncodeToMemory(keyPEMBlock) + + certInfoSlice = append(certInfoSlice, &certInfo{ + cert: cert, + certPEM: certPEM, + certBytes: certBytes, + key: key, + keyPEM: keyPEM, + }) + } + + // + // Listener setup + // + ports := make([]int, numCores) + if baseAddr != nil { + for i := 0; i < numCores; i++ { + ports[i] = baseAddr.Port + i + } + } else { + baseAddr = &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, + } + } + + listeners := [][]*TestListener{} + servers := []*http.Server{} + handlers := []http.Handler{} + tlsConfigs := []*tls.Config{} + certGetters := []*reload.CertificateGetter{} + for i := 0; i < numCores; i++ { + baseAddr.Port = ports[i] + ln, err := net.ListenTCP("tcp", baseAddr) + if err != nil { + t.Fatal(err) + } + certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) + keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) + err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0755) + if err != nil { + t.Fatal(err) + } + tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM) + if err != nil { + t.Fatal(err) + } + certGetter := reload.NewCertificateGetter(certFile, keyFile, "") + certGetters = append(certGetters, certGetter) + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + RootCAs: testCluster.RootCAs, + ClientCAs: testCluster.RootCAs, + ClientAuth: tls.RequestClientCert, + NextProtos: []string{"h2", "http/1.1"}, + GetCertificate: certGetter.GetCertificate, + } + if opts != nil && opts.RequireClientAuth { + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + testCluster.ClientAuthRequired = true + } + tlsConfig.BuildNameToCertificate() + tlsConfigs = append(tlsConfigs, tlsConfig) + lns := []*TestListener{&TestListener{ + Listener: tls.NewListener(ln, tlsConfig), + Address: ln.Addr().(*net.TCPAddr), + }, + } + listeners = append(listeners, lns) + var handler http.Handler = http.NewServeMux() + handlers = append(handlers, handler) + server := &http.Server{ + Handler: handler, + ErrorLog: testCluster.Logger.StandardLogger(nil), + } + servers = append(servers, server) + } + + // Create three cores with the same physical and different redirect/cluster + // addrs. + // N.B.: On OSX, instead of random ports, it assigns new ports to new + // listeners sequentially. Aside from being a bad idea in a security sense, + // it also broke tests that assumed it was OK to just use the port above + // the redirect addr. This has now been changed to 105 ports above, but if + // we ever do more than three nodes in a cluster it may need to be bumped. + // Note: it's 105 so that we don't conflict with a running Consul by + // default. + coreConfig := &CoreConfig{ + LogicalBackends: make(map[string]logical.Factory), + CredentialBackends: make(map[string]logical.Factory), + AuditBackends: make(map[string]audit.Factory), + RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port), + ClusterAddr: "https://127.0.0.1:0", + DisableMlock: true, + EnableUI: true, + EnableRaw: true, + BuiltinRegistry: NewMockBuiltinRegistry(), + } + + if base != nil { + coreConfig.RawConfig = base.RawConfig + coreConfig.DisableCache = base.DisableCache + coreConfig.EnableUI = base.EnableUI + coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL + coreConfig.MaxLeaseTTL = base.MaxLeaseTTL + coreConfig.CacheSize = base.CacheSize + coreConfig.PluginDirectory = base.PluginDirectory + coreConfig.Seal = base.Seal + coreConfig.DevToken = base.DevToken + coreConfig.EnableRaw = base.EnableRaw + coreConfig.DisableSealWrap = base.DisableSealWrap + coreConfig.DevLicenseDuration = base.DevLicenseDuration + coreConfig.DisableCache = base.DisableCache + coreConfig.LicensingConfig = base.LicensingConfig + coreConfig.DisablePerformanceStandby = base.DisablePerformanceStandby + coreConfig.MetricsHelper = base.MetricsHelper + coreConfig.SecureRandomReader = base.SecureRandomReader + if base.BuiltinRegistry != nil { + coreConfig.BuiltinRegistry = base.BuiltinRegistry + } + + if !coreConfig.DisableMlock { + base.DisableMlock = false + } + + if base.Physical != nil { + coreConfig.Physical = base.Physical + } + + if base.HAPhysical != nil { + coreConfig.HAPhysical = base.HAPhysical + } + + // Used to set something non-working to test fallback + switch base.ClusterAddr { + case "empty": + coreConfig.ClusterAddr = "" + case "": + default: + coreConfig.ClusterAddr = base.ClusterAddr + } + + if base.LogicalBackends != nil { + for k, v := range base.LogicalBackends { + coreConfig.LogicalBackends[k] = v + } + } + if base.CredentialBackends != nil { + for k, v := range base.CredentialBackends { + coreConfig.CredentialBackends[k] = v + } + } + if base.AuditBackends != nil { + for k, v := range base.AuditBackends { + coreConfig.AuditBackends[k] = v + } + } + if base.Logger != nil { + coreConfig.Logger = base.Logger + } + + coreConfig.ClusterCipherSuites = base.ClusterCipherSuites + + coreConfig.DisableCache = base.DisableCache + + coreConfig.DevToken = base.DevToken + coreConfig.CounterSyncInterval = base.CounterSyncInterval + coreConfig.RecoveryMode = base.RecoveryMode + } + + if coreConfig.RawConfig == nil { + coreConfig.RawConfig = new(server.Config) + } + + addAuditBackend := len(coreConfig.AuditBackends) == 0 + if addAuditBackend { + AddNoopAudit(coreConfig) + } + + if coreConfig.Physical == nil && (opts == nil || opts.PhysicalFactory == nil) { + coreConfig.Physical, err = physInmem.NewInmem(nil, testCluster.Logger) + if err != nil { + t.Fatal(err) + } + } + if coreConfig.HAPhysical == nil && (opts == nil || opts.PhysicalFactory == nil) { + haPhys, err := physInmem.NewInmemHA(nil, testCluster.Logger) + if err != nil { + t.Fatal(err) + } + coreConfig.HAPhysical = haPhys.(physical.HABackend) + } + + pubKey, priKey, err := testGenerateCoreKeys() + if err != nil { + t.Fatalf("err: %v", err) + } + + cleanupFuncs := []func(){} + cores := []*Core{} + coreConfigs := []*CoreConfig{} + for i := 0; i < numCores; i++ { + localConfig := *coreConfig + localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port) + + // if opts.SealFunc is provided, use that to generate a seal for the config instead + if opts != nil && opts.SealFunc != nil { + localConfig.Seal = opts.SealFunc() + } + + if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) { + localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", i)) + } + if opts != nil && opts.PhysicalFactory != nil { + physBundle := opts.PhysicalFactory(t, i, localConfig.Logger) + switch { + case physBundle == nil && coreConfig.Physical != nil: + case physBundle == nil && coreConfig.Physical == nil: + t.Fatal("PhysicalFactory produced no physical and none in CoreConfig") + case physBundle != nil: + testCluster.Logger.Info("created physical backend", "instance", i) + coreConfig.Physical = physBundle.Backend + localConfig.Physical = physBundle.Backend + base.Physical = physBundle.Backend + haBackend := physBundle.HABackend + if haBackend == nil { + if ha, ok := physBundle.Backend.(physical.HABackend); ok { + haBackend = ha + } + } + coreConfig.HAPhysical = haBackend + localConfig.HAPhysical = haBackend + if physBundle.Cleanup != nil { + cleanupFuncs = append(cleanupFuncs, physBundle.Cleanup) + } + } + } + + switch { + case localConfig.LicensingConfig != nil: + if pubKey != nil { + localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey.(ed25519.PublicKey)) + } + default: + localConfig.LicensingConfig = testGetLicensingConfig(pubKey) + } + + if localConfig.MetricsHelper == nil { + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + metrics.DefaultInmemSignal(inm) + localConfig.MetricsHelper = metricsutil.NewMetricsHelper(inm, false) + } + + c, err := NewCore(&localConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + c.coreNumber = firstCoreNumber + i + cores = append(cores, c) + coreConfigs = append(coreConfigs, &localConfig) + if opts != nil && opts.HandlerFunc != nil { + props := opts.DefaultHandlerProperties + props.Core = c + if props.MaxRequestDuration == 0 { + props.MaxRequestDuration = DefaultMaxRequestDuration + } + handlers[i] = opts.HandlerFunc(&props) + servers[i].Handler = handlers[i] + } + + // Set this in case the Seal was manually set before the core was + // created + if localConfig.Seal != nil { + localConfig.Seal.SetCore(c) + } + } + + // + // Clustering setup + // + clusterAddrGen := func(lns []*TestListener) []*net.TCPAddr { + ret := make([]*net.TCPAddr, len(lns)) + for i, ln := range lns { + ret[i] = &net.TCPAddr{ + IP: ln.Address.IP, + Port: 0, + } + } + return ret + } + + for i := 0; i < numCores; i++ { + if coreConfigs[i].ClusterAddr != "" { + cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i])) + cores[i].SetClusterHandler(handlers[i]) + } + } + + if opts == nil || !opts.SkipInit { + bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], handlers[0]) + barrierKeys, _ := copystructure.Copy(bKeys) + testCluster.BarrierKeys = barrierKeys.([][]byte) + recoveryKeys, _ := copystructure.Copy(rKeys) + testCluster.RecoveryKeys = recoveryKeys.([][]byte) + testCluster.RootToken = root + + // Write root token and barrier keys + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + for i, key := range testCluster.BarrierKeys { + buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) + if i < len(testCluster.BarrierKeys)-1 { + buf.WriteRune('\n') + } + } + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755) + if err != nil { + t.Fatal(err) + } + for i, key := range testCluster.RecoveryKeys { + buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) + if i < len(testCluster.RecoveryKeys)-1 { + buf.WriteRune('\n') + } + } + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755) + if err != nil { + t.Fatal(err) + } + + // Unseal first core + for _, key := range bKeys { + if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + ctx := context.Background() + + // If stored keys is supported, the above will no no-op, so trigger auto-unseal + // using stored keys to try to unseal + if err := cores[0].UnsealWithStoredKeys(ctx); err != nil { + t.Fatal(err) + } + + // Verify unsealed + if cores[0].Sealed() { + t.Fatal("should not be sealed") + } + + TestWaitActive(t, cores[0]) + + // Existing tests rely on this; we can make a toggle to disable it + // later if we want + kvReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "sys/mounts/secret", + Data: map[string]interface{}{ + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": "1", + }, + }, + } + resp, err := cores[0].HandleRequest(namespace.RootContext(ctx), kvReq) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatal(err) + } + + cfg, err := cores[0].seal.BarrierConfig(ctx) + if err != nil { + t.Fatal(err) + } + + // Unseal other cores unless otherwise specified + if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 { + for i := 1; i < numCores; i++ { + cores[i].seal.SetCachedBarrierConfig(cfg) + for _, key := range bKeys { + if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + // If stored keys is supported, the above will no no-op, so trigger auto-unseal + // using stored keys + if err := cores[i].UnsealWithStoredKeys(ctx); err != nil { + t.Fatal(err) + } + } + + // Let them come fully up to standby + time.Sleep(2 * time.Second) + + // Ensure cluster connection info is populated. + // Other cores should not come up as leaders. + for i := 1; i < numCores; i++ { + isLeader, _, _, err := cores[i].Leader() + if err != nil { + t.Fatal(err) + } + if isLeader { + t.Fatalf("core[%d] should not be leader", i) + } + } + } + + // + // Set test cluster core(s) and test cluster + // + cluster, err := cores[0].Cluster(context.Background()) + if err != nil { + t.Fatal(err) + } + testCluster.ID = cluster.ID + + if addAuditBackend { + // Enable auditing. + auditReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "sys/audit/noop", + Data: map[string]interface{}{ + "type": "noop", + }, + } + resp, err = cores[0].HandleRequest(namespace.RootContext(ctx), auditReq) + if err != nil { + t.Fatal(err) + } + + if resp.IsError() { + t.Fatal(err) + } + } + } + + getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + t.Fatal(config.Error) + } + config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) + config.HttpClient = client + config.MaxRetries = 0 + apiClient, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + if opts == nil || !opts.SkipInit { + apiClient.SetToken(testCluster.RootToken) + } + return apiClient + } + + var ret []*TestClusterCore + for i := 0; i < numCores; i++ { + tcc := &TestClusterCore{ + Core: cores[i], + CoreConfig: coreConfigs[i], + ServerKey: certInfoSlice[i].key, + ServerKeyPEM: certInfoSlice[i].keyPEM, + ServerCert: certInfoSlice[i].cert, + ServerCertBytes: certInfoSlice[i].certBytes, + ServerCertPEM: certInfoSlice[i].certPEM, + Listeners: listeners[i], + Handler: handlers[i], + Server: servers[i], + TLSConfig: tlsConfigs[i], + Client: getAPIClient(listeners[i][0].Address.Port, tlsConfigs[i]), + Barrier: cores[i].barrier, + NodeID: fmt.Sprintf("core-%d", i), + UnderlyingRawStorage: coreConfigs[i].Physical, + } + tcc.ReloadFuncs = &cores[i].reloadFuncs + tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock + tcc.ReloadFuncsLock.Lock() + (*tcc.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{certGetters[i].Reload} + tcc.ReloadFuncsLock.Unlock() + + testAdjustTestCore(base, tcc) + + ret = append(ret, tcc) + } + + testCluster.Cores = ret + + testExtraClusterCoresTestSetup(t, priKey, testCluster.Cores) + + testCluster.CleanupFunc = func() { + for _, c := range cleanupFuncs { + c() + } + } + if opts != nil { + if opts.SetupFunc != nil { + testCluster.SetupFunc = func() { + opts.SetupFunc(t, &testCluster) + } + } + } + + return &testCluster +} + +func NewMockBuiltinRegistry() *mockBuiltinRegistry { + return &mockBuiltinRegistry{ + forTesting: map[string]consts.PluginType{ + "mysql-database-plugin": consts.PluginTypeDatabase, + "postgresql-database-plugin": consts.PluginTypeDatabase, + }, + } +} + +type mockBuiltinRegistry struct { + forTesting map[string]consts.PluginType +} + +func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { + testPluginType, ok := m.forTesting[name] + if !ok { + return nil, false + } + if pluginType != testPluginType { + return nil, false + } + if name == "postgresql-database-plugin" { + return dbPostgres.New, true + } + return dbMysql.New(dbMysql.MetadataLen, dbMysql.MetadataLen, dbMysql.UsernameLen), true +} + +// Keys only supports getting a realistic list of the keys for database plugins. +func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { + if pluginType != consts.PluginTypeDatabase { + return []string{} + } + /* + This is a hard-coded reproduction of the db plugin keys in helper/builtinplugins/registry.go. + The registry isn't directly used because it causes import cycles. + */ + return []string{ + "mysql-database-plugin", + "mysql-aurora-database-plugin", + "mysql-rds-database-plugin", + "mysql-legacy-database-plugin", + "postgresql-database-plugin", + "elasticsearch-database-plugin", + "mssql-database-plugin", + "cassandra-database-plugin", + "mongodb-database-plugin", + "hana-database-plugin", + "influxdb-database-plugin", + } +} + +func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool { + return false +} + +type NoopAudit struct { + Config *audit.BackendConfig + ReqErr error + ReqAuth []*logical.Auth + Req []*logical.Request + ReqHeaders []map[string][]string + ReqNonHMACKeys []string + ReqErrs []error + + RespErr error + RespAuth []*logical.Auth + RespReq []*logical.Request + Resp []*logical.Response + RespNonHMACKeys []string + RespReqNonHMACKeys []string + RespErrs []error + + salt *salt.Salt + saltMutex sync.RWMutex +} + +func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { + n.ReqAuth = append(n.ReqAuth, in.Auth) + n.Req = append(n.Req, in.Request) + n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers) + n.ReqNonHMACKeys = in.NonHMACReqDataKeys + n.ReqErrs = append(n.ReqErrs, in.OuterErr) + return n.ReqErr +} + +func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { + n.RespAuth = append(n.RespAuth, in.Auth) + n.RespReq = append(n.RespReq, in.Request) + n.Resp = append(n.Resp, in.Response) + n.RespErrs = append(n.RespErrs, in.OuterErr) + + if in.Response != nil { + n.RespNonHMACKeys = in.NonHMACRespDataKeys + n.RespReqNonHMACKeys = in.NonHMACReqDataKeys + } + + return n.RespErr +} + +func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = salt + return salt, nil +} + +func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { + salt, err := n.Salt(ctx) + if err != nil { + return "", err + } + return salt.GetIdentifiedHMAC(data), nil +} + +func (n *NoopAudit) Reload(ctx context.Context) error { + return nil +} + +func (n *NoopAudit) Invalidate(ctx context.Context) { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/testing_util.go b/vendor/github.com/hashicorp/vault/vault/testing_util.go new file mode 100644 index 00000000..26c7cde0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/testing_util.go @@ -0,0 +1,14 @@ +// +build !enterprise + +package vault + +import ( + testing "github.com/mitchellh/go-testing-interface" +) + +func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil } +func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} } +func testExtraClusterCoresTestSetup(testing.T, interface{}, []*TestClusterCore) {} +func testAdjustTestCore(_ *CoreConfig, tcc *TestClusterCore) { + tcc.UnderlyingStorage = tcc.physical +} diff --git a/vendor/github.com/hashicorp/vault/vault/token_store.go b/vendor/github.com/hashicorp/vault/vault/token_store.go new file mode 100644 index 00000000..eb986088 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/token_store.go @@ -0,0 +1,3443 @@ +package vault + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/mitchellh/mapstructure" +) + +const ( + // idPrefix is the prefix used to store tokens for their + // primary ID based index + idPrefix = "id/" + + // accessorPrefix is the prefix used to store the index from + // Accessor to Token ID + accessorPrefix = "accessor/" + + // parentPrefix is the prefix used to store tokens for their + // secondary parent based index + parentPrefix = "parent/" + + // tokenSubPath is the sub-path used for the token store + // view. This is nested under the system view. + tokenSubPath = "token/" + + // rolesPrefix is the prefix used to store role information + rolesPrefix = "roles/" + + // tokenRevocationPending indicates that the token should not be used + // again. If this is encountered during an existing request flow, it means + // that the token is but is currently fulfilling its final use; after this + // request it will not be able to be looked up as being valid. + tokenRevocationPending = -1 +) + +var ( + // TokenLength is the size of tokens we are currently generating, without + // any namespace information + TokenLength = 24 + + // displayNameSanitize is used to sanitize a display name given to a token. + displayNameSanitize = regexp.MustCompile("[^a-zA-Z0-9-]") + + // pathSuffixSanitize is used to ensure a path suffix in a role is valid. + pathSuffixSanitize = regexp.MustCompile("\\w[\\w-.]+\\w") + + destroyCubbyhole = func(ctx context.Context, ts *TokenStore, te *logical.TokenEntry) error { + if ts.cubbyholeBackend == nil { + // Should only ever happen in testing + return nil + } + + if te == nil { + return errors.New("nil token entry") + } + + switch { + case te.NamespaceID == namespace.RootNamespaceID && !strings.HasPrefix(te.ID, "s."): + saltedID, err := ts.SaltID(ctx, te.ID) + if err != nil { + return err + } + return ts.cubbyholeBackend.revoke(ctx, salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash)) + + default: + if te.CubbyholeID == "" { + return fmt.Errorf("missing cubbyhole ID while destroying") + } + return ts.cubbyholeBackend.revoke(ctx, te.CubbyholeID) + } + } +) + +func (ts *TokenStore) paths() []*framework.Path { + p := []*framework.Path{ + { + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: ts.tokenStoreRoleList, + }, + + HelpSynopsis: tokenListRolesHelp, + HelpDescription: tokenListRolesHelp, + }, + + { + Pattern: "accessors/$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: ts.tokenStoreAccessorList, + }, + + HelpSynopsis: tokenListAccessorsHelp, + HelpDescription: tokenListAccessorsHelp, + }, + + { + Pattern: "create-orphan$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleCreateOrphan, + }, + + HelpSynopsis: strings.TrimSpace(tokenCreateOrphanHelp), + HelpDescription: strings.TrimSpace(tokenCreateOrphanHelp), + }, + + { + Pattern: "create/" + framework.GenericNameRegex("role_name"), + + Fields: map[string]*framework.FieldSchema{ + "role_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleCreateAgainstRole, + }, + + HelpSynopsis: strings.TrimSpace(tokenCreateRoleHelp), + HelpDescription: strings.TrimSpace(tokenCreateRoleHelp), + }, + + { + Pattern: "create$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleCreate, + }, + + HelpSynopsis: strings.TrimSpace(tokenCreateHelp), + HelpDescription: strings.TrimSpace(tokenCreateHelp), + }, + + { + Pattern: "lookup", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to lookup (POST request body)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: ts.handleLookup, + logical.UpdateOperation: ts.handleLookup, + }, + + HelpSynopsis: strings.TrimSpace(tokenLookupHelp), + HelpDescription: strings.TrimSpace(tokenLookupHelp), + }, + + { + Pattern: "lookup-accessor", + + Fields: map[string]*framework.FieldSchema{ + "accessor": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Accessor of the token to look up (request body)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleUpdateLookupAccessor, + }, + + HelpSynopsis: strings.TrimSpace(tokenLookupAccessorHelp), + HelpDescription: strings.TrimSpace(tokenLookupAccessorHelp), + }, + + { + Pattern: "lookup-self$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to look up (unused, does not need to be set)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleLookupSelf, + logical.ReadOperation: ts.handleLookupSelf, + }, + + HelpSynopsis: strings.TrimSpace(tokenLookupHelp), + HelpDescription: strings.TrimSpace(tokenLookupHelp), + }, + + { + Pattern: "revoke-accessor", + + Fields: map[string]*framework.FieldSchema{ + "accessor": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Accessor of the token (request body)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleUpdateRevokeAccessor, + }, + + HelpSynopsis: strings.TrimSpace(tokenRevokeAccessorHelp), + HelpDescription: strings.TrimSpace(tokenRevokeAccessorHelp), + }, + + { + Pattern: "revoke-self$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleRevokeSelf, + }, + + HelpSynopsis: strings.TrimSpace(tokenRevokeSelfHelp), + HelpDescription: strings.TrimSpace(tokenRevokeSelfHelp), + }, + + { + Pattern: "revoke", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to revoke (request body)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleRevokeTree, + }, + + HelpSynopsis: strings.TrimSpace(tokenRevokeHelp), + HelpDescription: strings.TrimSpace(tokenRevokeHelp), + }, + + { + Pattern: "revoke-orphan", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to revoke (request body)", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleRevokeOrphan, + }, + + HelpSynopsis: strings.TrimSpace(tokenRevokeOrphanHelp), + HelpDescription: strings.TrimSpace(tokenRevokeOrphanHelp), + }, + + { + Pattern: "renew-accessor", + + Fields: map[string]*framework.FieldSchema{ + "accessor": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Accessor of the token to renew (request body)", + }, + "increment": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: "The desired increment in seconds to the token expiration", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleUpdateRenewAccessor, + }, + + HelpSynopsis: strings.TrimSpace(tokenRenewAccessorHelp), + HelpDescription: strings.TrimSpace(tokenRenewAccessorHelp), + }, + + { + Pattern: "renew-self$", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to renew (unused, does not need to be set)", + }, + "increment": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: "The desired increment in seconds to the token expiration", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleRenewSelf, + }, + + HelpSynopsis: strings.TrimSpace(tokenRenewSelfHelp), + HelpDescription: strings.TrimSpace(tokenRenewSelfHelp), + }, + + { + Pattern: "renew", + + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to renew (request body)", + }, + "increment": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: "The desired increment in seconds to the token expiration", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleRenew, + }, + + HelpSynopsis: strings.TrimSpace(tokenRenewHelp), + HelpDescription: strings.TrimSpace(tokenRenewHelp), + }, + + { + Pattern: "tidy$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: ts.handleTidy, + }, + + HelpSynopsis: strings.TrimSpace(tokenTidyHelp), + HelpDescription: strings.TrimSpace(tokenTidyDesc), + }, + } + + rolesPath := &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("role_name"), + Fields: map[string]*framework.FieldSchema{ + "role_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + + "allowed_policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: tokenAllowedPoliciesHelp, + }, + + "disallowed_policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: tokenDisallowedPoliciesHelp, + }, + + "orphan": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: tokenOrphanHelp, + }, + + "period": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "Use 'token_period' instead.", + Deprecated: true, + }, + + "path_suffix": &framework.FieldSchema{ + Type: framework.TypeString, + Description: tokenPathSuffixHelp + pathSuffixSanitize.String(), + }, + + "explicit_max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "Use 'token_explicit_max_ttl' instead.", + Deprecated: true, + }, + + "renewable": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: tokenRenewableHelp, + }, + + "bound_cidrs": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Use 'token_bound_cidrs' instead.", + Deprecated: true, + }, + + "allowed_entity_aliases": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "String or JSON list of allowed entity aliases. If set, specifies the entity aliases which are allowed to be used during token generation. This field supports globbing.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: ts.tokenStoreRoleRead, + logical.CreateOperation: ts.tokenStoreRoleCreateUpdate, + logical.UpdateOperation: ts.tokenStoreRoleCreateUpdate, + logical.DeleteOperation: ts.tokenStoreRoleDelete, + }, + + ExistenceCheck: ts.tokenStoreRoleExistenceCheck, + } + + tokenutil.AddTokenFieldsWithAllowList(rolesPath.Fields, []string{"token_bound_cidrs", "token_explicit_max_ttl", "token_period", "token_type", "token_no_default_policy", "token_num_uses"}) + p = append(p, rolesPath) + + return p +} + +// LookupToken returns the properties of the token from the token store. This +// is particularly useful to fetch the accessor of the client token and get it +// populated in the logical request along with the client token. The accessor +// of the client token can get audit logged. +func (c *Core) LookupToken(ctx context.Context, token string) (*logical.TokenEntry, error) { + if c.Sealed() { + return nil, consts.ErrSealed + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + + if c.standby && !c.perfStandby { + return nil, consts.ErrStandby + } + + // Many tests don't have a token store running + if c.tokenStore == nil || c.tokenStore.expiration == nil { + return nil, nil + } + + return c.tokenStore.Lookup(ctx, token) +} + +// TokenStore is used to manage client tokens. Tokens are used for +// clients to authenticate, and each token is mapped to an applicable +// set of policy which is used for authorization. +type TokenStore struct { + *framework.Backend + + activeContext context.Context + + core *Core + + batchTokenEncryptor BarrierEncryptor + + baseBarrierView *BarrierView + idBarrierView *BarrierView + accessorBarrierView *BarrierView + parentBarrierView *BarrierView + rolesBarrierView *BarrierView + + expiration *ExpirationManager + + cubbyholeBackend *CubbyholeBackend + + tokenLocks []*locksutil.LockEntry + + // tokenPendingDeletion stores tokens that are being revoked. If the token is + // not in the map, it means that there's no deletion in progress. If the value + // is true it means deletion is in progress, and if false it means deletion + // failed. Revocation needs to handle these states accordingly. + tokensPendingDeletion *sync.Map + + cubbyholeDestroyer func(context.Context, *TokenStore, *logical.TokenEntry) error + + logger log.Logger + + saltLock sync.RWMutex + salts map[string]*salt.Salt + + tidyLock *uint32 + + identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error) + + quitContext context.Context +} + +// NewTokenStore is used to construct a token store that is +// backed by the given barrier view. +func NewTokenStore(ctx context.Context, logger log.Logger, core *Core, config *logical.BackendConfig) (*TokenStore, error) { + // Create a sub-view + view := core.systemBarrierView.SubView(tokenSubPath) + + // Initialize the store + t := &TokenStore{ + activeContext: ctx, + core: core, + batchTokenEncryptor: core.barrier, + baseBarrierView: view, + idBarrierView: view.SubView(idPrefix), + accessorBarrierView: view.SubView(accessorPrefix), + parentBarrierView: view.SubView(parentPrefix), + rolesBarrierView: view.SubView(rolesPrefix), + cubbyholeDestroyer: destroyCubbyhole, + logger: logger, + tokenLocks: locksutil.CreateLocks(), + tokensPendingDeletion: &sync.Map{}, + saltLock: sync.RWMutex{}, + tidyLock: new(uint32), + quitContext: core.activeContext, + salts: make(map[string]*salt.Salt), + } + + // Setup the framework endpoints + t.Backend = &framework.Backend{ + AuthRenew: t.authRenew, + + PathsSpecial: &logical.Paths{ + Root: []string{ + "revoke-orphan/*", + "accessors*", + }, + + // Most token store items are local since tokens are local, but a + // notable exception is roles + LocalStorage: []string{ + idPrefix, + accessorPrefix, + parentPrefix, + salt.DefaultLocation, + }, + }, + BackendType: logical.TypeCredential, + } + + t.Backend.Paths = append(t.Backend.Paths, t.paths()...) + + t.Backend.Setup(ctx, config) + + return t, nil +} + +func (ts *TokenStore) Invalidate(ctx context.Context, key string) { + + switch key { + case tokenSubPath + salt.DefaultLocation: + ts.saltLock.Lock() + ts.salts = make(map[string]*salt.Salt) + ts.saltLock.Unlock() + } +} + +func (ts *TokenStore) Salt(ctx context.Context) (*salt.Salt, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + ts.saltLock.RLock() + if salt, ok := ts.salts[ns.ID]; ok { + defer ts.saltLock.RUnlock() + return salt, nil + } + ts.saltLock.RUnlock() + ts.saltLock.Lock() + defer ts.saltLock.Unlock() + if salt, ok := ts.salts[ns.ID]; ok { + return salt, nil + } + + salt, err := salt.NewSalt(ctx, ts.baseView(ns), &salt.Config{ + HashFunc: salt.SHA1Hash, + Location: salt.DefaultLocation, + }) + if err != nil { + return nil, err + } + ts.salts[ns.ID] = salt + return salt, nil +} + +// tsRoleEntry contains token store role information +type tsRoleEntry struct { + tokenutil.TokenParams + + // The name of the role. Embedded so it can be used for pathing + Name string `json:"name" mapstructure:"name" structs:"name"` + + // The policies that creation functions using this role can assign to a token, + // escaping or further locking down normal subset checking + AllowedPolicies []string `json:"allowed_policies" mapstructure:"allowed_policies" structs:"allowed_policies"` + + // List of policies to be not allowed during token creation using this role + DisallowedPolicies []string `json:"disallowed_policies" mapstructure:"disallowed_policies" structs:"disallowed_policies"` + + // If true, tokens created using this role will be orphans + Orphan bool `json:"orphan" mapstructure:"orphan" structs:"orphan"` + + // If non-zero, tokens created using this role will be able to be renewed + // forever, but will have a fixed renewal period of this value + Period time.Duration `json:"period" mapstructure:"period" structs:"period"` + + // If set, a suffix will be set on the token path, making it easier to + // revoke using 'revoke-prefix' + PathSuffix string `json:"path_suffix" mapstructure:"path_suffix" structs:"path_suffix"` + + // If set, controls whether created tokens are marked as being renewable + Renewable bool `json:"renewable" mapstructure:"renewable" structs:"renewable"` + + // If set, the token entry will have an explicit maximum TTL set, rather + // than deferring to role/mount values + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` + + // The set of CIDRs that tokens generated using this role will be bound to + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` + + // The set of allowed entity aliases used during token creation + AllowedEntityAliases []string `json:"allowed_entity_aliases" mapstructure:"allowed_entity_aliases" structs:"allowed_entity_aliases"` +} + +type accessorEntry struct { + TokenID string `json:"token_id"` + AccessorID string `json:"accessor_id"` + NamespaceID string `json:"namespace_id"` +} + +// SetExpirationManager is used to provide the token store with +// an expiration manager. This is used to manage prefix based revocation +// of tokens and to tidy entries when removed from the token store. +func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) { + ts.expiration = exp +} + +// SaltID is used to apply a salt and hash to an ID to make sure its not reversible +func (ts *TokenStore) SaltID(ctx context.Context, id string) (string, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return "", namespace.ErrNoNamespace + } + + s, err := ts.Salt(ctx) + if err != nil { + return "", err + } + + // For tokens of older format and belonging to the root namespace, use SHA1 + // hash for salting. + if ns.ID == namespace.RootNamespaceID && !strings.Contains(id, ".") { + return s.SaltID(id), nil + } + + // For all other tokens, use SHA2-256 HMAC for salting. This includes + // tokens of older format, but belonging to a namespace other than the root + // namespace. + return "h" + s.GetHMAC(id), nil +} + +// rootToken is used to generate a new token with root privileges and no parent +func (ts *TokenStore) rootToken(ctx context.Context) (*logical.TokenEntry, error) { + ctx = namespace.ContextWithNamespace(ctx, namespace.RootNamespace) + te := &logical.TokenEntry{ + Policies: []string{"root"}, + Path: "auth/token/root", + DisplayName: "root", + CreationTime: time.Now().Unix(), + NamespaceID: namespace.RootNamespaceID, + Type: logical.TokenTypeService, + } + if err := ts.create(ctx, te); err != nil { + return nil, err + } + return te, nil +} + +func (ts *TokenStore) tokenStoreAccessorList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + nsID := ns.ID + + entries, err := ts.accessorView(ns).List(ctx, "") + if err != nil { + return nil, err + } + + resp := &logical.Response{} + + ret := make([]string, 0, len(entries)) + for _, entry := range entries { + aEntry, err := ts.lookupByAccessor(ctx, entry, true, false) + if err != nil { + resp.AddWarning(fmt.Sprintf("Found an accessor entry that could not be successfully decoded; associated error is %q", err.Error())) + continue + } + + if aEntry.TokenID == "" { + resp.AddWarning(fmt.Sprintf("Found an accessor entry missing a token: %v", aEntry.AccessorID)) + continue + } + + if aEntry.NamespaceID == nsID { + ret = append(ret, aEntry.AccessorID) + } + } + + resp.Data = map[string]interface{}{ + "keys": ret, + } + return resp, nil +} + +// createAccessor is used to create an identifier for the token ID. +// A storage index, mapping the accessor to the token ID is also created. +func (ts *TokenStore) createAccessor(ctx context.Context, entry *logical.TokenEntry) error { + defer metrics.MeasureSince([]string{"token", "createAccessor"}, time.Now()) + + var err error + // Create a random accessor + entry.Accessor, err = base62.Random(TokenLength) + if err != nil { + return err + } + + tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + if tokenNS.ID != namespace.RootNamespaceID { + entry.Accessor = fmt.Sprintf("%s.%s", entry.Accessor, tokenNS.ID) + } + + // Create index entry, mapping the accessor to the token ID + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltID, err := ts.SaltID(saltCtx, entry.Accessor) + if err != nil { + return err + } + + aEntry := &accessorEntry{ + TokenID: entry.ID, + AccessorID: entry.Accessor, + NamespaceID: entry.NamespaceID, + } + + aEntryBytes, err := jsonutil.EncodeJSON(aEntry) + if err != nil { + return errwrap.Wrapf("failed to marshal accessor index entry: {{err}}", err) + } + + le := &logical.StorageEntry{Key: saltID, Value: aEntryBytes} + if err := ts.accessorView(tokenNS).Put(ctx, le); err != nil { + return errwrap.Wrapf("failed to persist accessor index entry: {{err}}", err) + } + return nil +} + +// Create is used to create a new token entry. The entry is assigned +// a newly generated ID if not provided. +func (ts *TokenStore) create(ctx context.Context, entry *logical.TokenEntry) error { + defer metrics.MeasureSince([]string{"token", "create"}, time.Now()) + + tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + entry.Policies = policyutil.SanitizePolicies(entry.Policies, policyutil.DoNotAddDefaultPolicy) + if len(entry.Policies) == 1 && entry.Policies[0] == "root" { + metrics.IncrCounter([]string{"token", "create_root"}, 1) + } + + switch entry.Type { + case logical.TokenTypeDefault, logical.TokenTypeService: + // In case it was default, force to service + entry.Type = logical.TokenTypeService + + // Generate an ID if necessary + userSelectedID := true + if entry.ID == "" { + userSelectedID = false + var err error + entry.ID, err = base62.RandomWithReader(TokenLength, ts.core.secureRandomReader) + if err != nil { + return err + } + } + + if userSelectedID && strings.HasPrefix(entry.ID, "s.") { + return fmt.Errorf("custom token ID cannot have the 's.' prefix") + } + + if !userSelectedID { + entry.ID = fmt.Sprintf("s.%s", entry.ID) + } + + // Attach namespace ID for tokens that are not belonging to the root + // namespace + if tokenNS.ID != namespace.RootNamespaceID { + entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID) + } + + if tokenNS.ID != namespace.RootNamespaceID || strings.HasPrefix(entry.ID, "s.") { + if entry.CubbyholeID == "" { + cubbyholeID, err := base62.Random(TokenLength) + if err != nil { + return err + } + entry.CubbyholeID = cubbyholeID + } + } + + // If the user didn't specifically pick the ID, e.g. because they were + // sudo/root, check for collision; otherwise trust the process + if userSelectedID { + exist, _ := ts.lookupInternal(ctx, entry.ID, false, true) + if exist != nil { + return fmt.Errorf("cannot create a token with a duplicate ID") + } + } + + err = ts.createAccessor(ctx, entry) + if err != nil { + return err + } + + return ts.storeCommon(ctx, entry, true) + + case logical.TokenTypeBatch: + // Ensure fields we don't support/care about are nilled, proto marshal, + // encrypt, skip persistence + entry.ID = "" + pEntry := &pb.TokenEntry{ + Parent: entry.Parent, + Policies: entry.Policies, + Path: entry.Path, + Meta: entry.Meta, + DisplayName: entry.DisplayName, + CreationTime: entry.CreationTime, + TTL: int64(entry.TTL), + Role: entry.Role, + EntityID: entry.EntityID, + NamespaceID: entry.NamespaceID, + Type: uint32(entry.Type), + } + + boundCIDRs := make([]string, len(entry.BoundCIDRs)) + for i, cidr := range entry.BoundCIDRs { + boundCIDRs[i] = cidr.String() + } + pEntry.BoundCIDRs = boundCIDRs + + mEntry, err := proto.Marshal(pEntry) + if err != nil { + return err + } + + eEntry, err := ts.batchTokenEncryptor.Encrypt(ctx, "", mEntry) + if err != nil { + return err + } + + bEntry := base64.RawURLEncoding.EncodeToString(eEntry) + entry.ID = fmt.Sprintf("b.%s", bEntry) + + if tokenNS.ID != namespace.RootNamespaceID { + entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID) + } + + return nil + + default: + return fmt.Errorf("cannot create a token of type %d", entry.Type) + } +} + +// Store is used to store an updated token entry without writing the +// secondary index. +func (ts *TokenStore) store(ctx context.Context, entry *logical.TokenEntry) error { + defer metrics.MeasureSince([]string{"token", "store"}, time.Now()) + return ts.storeCommon(ctx, entry, false) +} + +// storeCommon handles the actual storage of an entry, possibly generating +// secondary indexes +func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry, writeSecondary bool) error { + tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + saltCtx := namespace.ContextWithNamespace(ctx, tokenNS) + saltedID, err := ts.SaltID(saltCtx, entry.ID) + if err != nil { + return err + } + + // Marshal the entry + enc, err := json.Marshal(entry) + if err != nil { + return errwrap.Wrapf("failed to encode entry: {{err}}", err) + } + + if writeSecondary { + // Write the secondary index if necessary. This is done before the + // primary index because we'd rather have a dangling pointer with + // a missing primary instead of missing the parent index and potentially + // escaping the revocation chain. + if entry.Parent != "" { + // Ensure the parent exists + parent, err := ts.Lookup(ctx, entry.Parent) + if err != nil { + return errwrap.Wrapf("failed to lookup parent: {{err}}", err) + } + if parent == nil { + return fmt.Errorf("parent token not found") + } + + parentNS, err := NamespaceByID(ctx, parent.NamespaceID, ts.core) + if err != nil { + return err + } + if parentNS == nil { + return namespace.ErrNoNamespace + } + + parentCtx := namespace.ContextWithNamespace(ctx, parentNS) + + // Create the index entry + parentSaltedID, err := ts.SaltID(parentCtx, entry.Parent) + if err != nil { + return err + } + + path := parentSaltedID + "/" + saltedID + if tokenNS.ID != namespace.RootNamespaceID { + path = fmt.Sprintf("%s.%s", path, tokenNS.ID) + } + + le := &logical.StorageEntry{Key: path} + if err := ts.parentView(parentNS).Put(ctx, le); err != nil { + return errwrap.Wrapf("failed to persist entry: {{err}}", err) + } + } + } + + // Write the primary ID + le := &logical.StorageEntry{Key: saltedID, Value: enc} + if len(entry.Policies) == 1 && entry.Policies[0] == "root" { + le.SealWrap = true + } + if err := ts.idView(tokenNS).Put(ctx, le); err != nil { + return errwrap.Wrapf("failed to persist entry: {{err}}", err) + } + return nil +} + +// UseToken is used to manage restricted use tokens and decrement their +// available uses. Returns two values: a potentially updated entry or, if the +// token has been revoked, nil; and whether an error was encountered. The +// locking here isn't perfect, as other parts of the code may update an entry, +// but usually none after the entry is already created...so this is pretty +// good. +func (ts *TokenStore) UseToken(ctx context.Context, te *logical.TokenEntry) (*logical.TokenEntry, error) { + if te == nil { + return nil, fmt.Errorf("invalid token entry provided for use count decrementing") + } + + // This case won't be hit with a token with restricted uses because we go + // from 1 to -1. So it's a nice optimization to check this without a read + // lock. + if te.NumUses == 0 { + return te, nil + } + + // If we are attempting to unwrap a control group request, don't use the token. + // It will be manually revoked by the handler. + if len(te.Policies) == 1 && te.Policies[0] == controlGroupPolicyName { + return te, nil + } + + lock := locksutil.LockForKey(ts.tokenLocks, te.ID) + lock.Lock() + defer lock.Unlock() + + var err error + te, err = ts.lookupInternal(ctx, te.ID, false, false) + if err != nil { + return nil, errwrap.Wrapf("failed to refresh entry: {{err}}", err) + } + // If it can't be found we shouldn't be trying to use it, so if we get nil + // back, it is because it has been revoked in the interim or will be + // revoked (NumUses is -1) + if te == nil { + return nil, fmt.Errorf("token not found or fully used already") + } + + // Decrement the count. If this is our last use count, we need to indicate + // that this is no longer valid, but revocation is deferred to the end of + // the call, so this will make sure that any Lookup that happens doesn't + // return an entry. This essentially acts as a write-ahead lock and is + // especially useful since revocation can end up (via the expiration + // manager revoking children) attempting to acquire the same lock + // repeatedly. + if te.NumUses == 1 { + te.NumUses = tokenRevocationPending + } else { + te.NumUses-- + } + + err = ts.store(ctx, te) + if err != nil { + return nil, err + } + + return te, nil +} + +func (ts *TokenStore) UseTokenByID(ctx context.Context, id string) (*logical.TokenEntry, error) { + te, err := ts.Lookup(ctx, id) + if err != nil { + return te, err + } + + return ts.UseToken(ctx, te) +} + +// Lookup is used to find a token given its ID. It acquires a read lock, then calls lookupInternal. +func (ts *TokenStore) Lookup(ctx context.Context, id string) (*logical.TokenEntry, error) { + defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now()) + if id == "" { + return nil, fmt.Errorf("cannot lookup blank token") + } + + // If it starts with "b." it's a batch token + if len(id) > 2 && strings.HasPrefix(id, "b.") { + return ts.lookupBatchToken(ctx, id) + } + + lock := locksutil.LockForKey(ts.tokenLocks, id) + lock.RLock() + defer lock.RUnlock() + + return ts.lookupInternal(ctx, id, false, false) +} + +// lookupTainted is used to find a token that may or may not be tainted given +// its ID. It acquires a read lock, then calls lookupInternal. +func (ts *TokenStore) lookupTainted(ctx context.Context, id string) (*logical.TokenEntry, error) { + defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now()) + if id == "" { + return nil, fmt.Errorf("cannot lookup blank token") + } + + lock := locksutil.LockForKey(ts.tokenLocks, id) + lock.RLock() + defer lock.RUnlock() + + return ts.lookupInternal(ctx, id, false, true) +} + +func (ts *TokenStore) lookupBatchToken(ctx context.Context, id string) (*logical.TokenEntry, error) { + // Strip the b. from the front and namespace ID from the back + bEntry, _ := namespace.SplitIDFromString(id[2:]) + + eEntry, err := base64.RawURLEncoding.DecodeString(bEntry) + if err != nil { + return nil, err + } + + mEntry, err := ts.batchTokenEncryptor.Decrypt(ctx, "", eEntry) + if err != nil { + return nil, nil + } + + pEntry := new(pb.TokenEntry) + if err := proto.Unmarshal(mEntry, pEntry); err != nil { + return nil, err + } + + te, err := pb.ProtoTokenEntryToLogicalTokenEntry(pEntry) + if err != nil { + return nil, err + } + + if time.Now().After(time.Unix(te.CreationTime, 0).Add(te.TTL)) { + return nil, nil + } + + if te.Parent != "" { + pte, err := ts.Lookup(ctx, te.Parent) + if err != nil { + return nil, err + } + if pte == nil { + return nil, nil + } + } + + te.ID = id + return te, nil +} + +// lookupInternal is used to find a token given its (possibly salted) ID. If +// tainted is true, entries that are in some revocation state (currently, +// indicated by num uses < 0), the entry will be returned anyways +func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tainted bool) (*logical.TokenEntry, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, errwrap.Wrapf("failed to find namespace in context: {{err}}", err) + } + + // If it starts with "b." it's a batch token + if len(id) > 2 && strings.HasPrefix(id, "b.") { + return ts.lookupBatchToken(ctx, id) + } + + var raw *logical.StorageEntry + lookupID := id + + if !salted { + // If possible, always use the token's namespace. If it doesn't match + // the request namespace, ensure the request namespace is a child + _, nsID := namespace.SplitIDFromString(id) + if nsID != "" { + tokenNS, err := NamespaceByID(ctx, nsID, ts.core) + if err != nil { + return nil, errwrap.Wrapf("failed to look up namespace from the token: {{err}}", err) + } + if tokenNS != nil { + if tokenNS.ID != ns.ID { + ns = tokenNS + ctx = namespace.ContextWithNamespace(ctx, tokenNS) + } + } + } else { + // Any non-root-ns token should have an accessor and child + // namespaces cannot have custom IDs. If someone omits or tampers + // with it, the lookup in the root namespace simply won't work. + ns = namespace.RootNamespace + ctx = namespace.ContextWithNamespace(ctx, ns) + } + + lookupID, err = ts.SaltID(ctx, id) + if err != nil { + return nil, err + } + } + + raw, err = ts.idView(ns).Get(ctx, lookupID) + if err != nil { + return nil, errwrap.Wrapf("failed to read entry: {{err}}", err) + } + + // Bail if not found + if raw == nil { + return nil, nil + } + + // Unmarshal the token + entry := new(logical.TokenEntry) + if err := jsonutil.DecodeJSON(raw.Value, entry); err != nil { + return nil, errwrap.Wrapf("failed to decode entry: {{err}}", err) + } + + // This is a token that is awaiting deferred revocation or tainted + if entry.NumUses < 0 && !tainted { + return nil, nil + } + + if entry.NamespaceID == "" { + entry.NamespaceID = namespace.RootNamespaceID + } + + // This will be the upgrade case + if entry.Type == logical.TokenTypeDefault { + entry.Type = logical.TokenTypeService + } + + persistNeeded := false + + // Upgrade the deprecated fields + if entry.DisplayNameDeprecated != "" { + if entry.DisplayName == "" { + entry.DisplayName = entry.DisplayNameDeprecated + } + entry.DisplayNameDeprecated = "" + persistNeeded = true + } + + if entry.CreationTimeDeprecated != 0 { + if entry.CreationTime == 0 { + entry.CreationTime = entry.CreationTimeDeprecated + } + entry.CreationTimeDeprecated = 0 + persistNeeded = true + } + + if entry.ExplicitMaxTTLDeprecated != 0 { + if entry.ExplicitMaxTTL == 0 { + entry.ExplicitMaxTTL = entry.ExplicitMaxTTLDeprecated + } + entry.ExplicitMaxTTLDeprecated = 0 + persistNeeded = true + } + + if entry.NumUsesDeprecated != 0 { + if entry.NumUses == 0 || entry.NumUsesDeprecated < entry.NumUses { + entry.NumUses = entry.NumUsesDeprecated + } + entry.NumUsesDeprecated = 0 + persistNeeded = true + } + + // It's a root token with unlimited creation TTL (so never had an + // expiration); this may or may not have a lease (based on when it was + // generated, for later revocation purposes) but it doesn't matter, it's + // allowed. Fast-path this. + if len(entry.Policies) == 1 && entry.Policies[0] == "root" && entry.TTL == 0 { + // If fields are getting upgraded, store the changes + if persistNeeded { + if err := ts.store(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err) + } + } + return entry, nil + } + + // Perform these checks on upgraded fields, but before persisting + + // If we are still restoring the expiration manager, we want to ensure the + // token is not expired + if ts.expiration == nil { + return nil, errors.New("expiration manager is nil on tokenstore") + } + le, err := ts.expiration.FetchLeaseTimesByToken(ctx, entry) + if err != nil { + return nil, errwrap.Wrapf("failed to fetch lease times: {{err}}", err) + } + + var ret *logical.TokenEntry + + switch { + // It's any kind of expiring token with no lease, immediately delete it + case le == nil: + tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS) + leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, entry) + if err != nil { + return nil, err + } + + err = ts.expiration.Revoke(revokeCtx, leaseID) + if err != nil { + return nil, err + } + + // Only return if we're not past lease expiration (or if tainted is true), + // otherwise assume expmgr is working on revocation + default: + if !le.ExpireTime.Before(time.Now()) || tainted { + ret = entry + } + } + + // If fields are getting upgraded, store the changes + if persistNeeded { + if err := ts.store(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err) + } + } + + return ret, nil +} + +// Revoke is used to invalidate a given token, any child tokens +// will be orphaned. +func (ts *TokenStore) revokeOrphan(ctx context.Context, id string) error { + defer metrics.MeasureSince([]string{"token", "revoke"}, time.Now()) + if id == "" { + return fmt.Errorf("cannot revoke blank token") + } + + saltedID, err := ts.SaltID(ctx, id) + if err != nil { + return err + } + + return ts.revokeInternal(ctx, saltedID, false) +} + +// revokeInternal is used to invalidate a given salted token, any child tokens +// will be orphaned unless otherwise specified. skipOrphan should be used +// whenever we are revoking the entire tree starting from a particular parent +// (e.g. revokeTreeInternal). +func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipOrphan bool) (ret error) { + // Check and set the token deletion state. We only proceed with the deletion + // if we don't have a pending deletion (empty), or if the deletion previously + // failed (state is false) + state, loaded := ts.tokensPendingDeletion.LoadOrStore(saltedID, true) + + // If the entry was loaded and its state is true, we short-circuit + if loaded && state == true { + return nil + } + + // The map check above should protect use from any concurrent revocations, so + // we do another lookup here to make sure we have the right state + entry, err := ts.lookupInternal(ctx, saltedID, true, true) + if err != nil { + return err + } + if entry == nil { + return nil + } + + if entry.NumUses != tokenRevocationPending { + entry.NumUses = tokenRevocationPending + if err := ts.store(ctx, entry); err != nil { + // The only real reason for this is an underlying storage error + // which also means that nothing else in this func or expmgr will + // really work either. So we clear revocation state so the user can + // try again. + ts.logger.Error("failed to mark token as revoked") + ts.tokensPendingDeletion.Store(entry.ID, false) + return err + } + } + + tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core) + if err != nil { + return err + } + if tokenNS == nil { + return namespace.ErrNoNamespace + } + + defer func() { + // If we succeeded in all other revocation operations after this defer and + // before we return, we can remove the token store entry + if ret == nil { + if err := ts.idView(tokenNS).Delete(ctx, saltedID); err != nil { + ret = errwrap.Wrapf("failed to delete entry: {{err}}", err) + } + } + + // Check on ret again and update the sync.Map accordingly + if ret != nil { + // If we failed on any of the calls within, we store the state as false + // so that the next call to revokeInternal will retry + ts.tokensPendingDeletion.Store(saltedID, false) + } else { + ts.tokensPendingDeletion.Delete(saltedID) + } + }() + + // Destroy the token's cubby. This should go first as it's a + // security-sensitive item. + err = ts.cubbyholeDestroyer(ctx, ts, entry) + if err != nil { + return err + } + + revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS) + if err := ts.expiration.RevokeByToken(revokeCtx, entry); err != nil { + return err + } + + // Clear the secondary index if any + if entry.Parent != "" { + _, parentNSID := namespace.SplitIDFromString(entry.Parent) + parentCtx := revokeCtx + parentNS := tokenNS + + if parentNSID != tokenNS.ID { + switch { + case parentNSID == "": + parentNS = namespace.RootNamespace + default: + parentNS, err = NamespaceByID(ctx, parentNSID, ts.core) + if err != nil { + return errwrap.Wrapf("failed to get parent namespace: {{err}}", err) + } + if parentNS == nil { + return namespace.ErrNoNamespace + } + } + + parentCtx = namespace.ContextWithNamespace(ctx, parentNS) + } + + parentSaltedID, err := ts.SaltID(parentCtx, entry.Parent) + if err != nil { + return err + } + + path := parentSaltedID + "/" + saltedID + if tokenNS.ID != namespace.RootNamespaceID { + path = fmt.Sprintf("%s.%s", path, tokenNS.ID) + } + + if err = ts.parentView(parentNS).Delete(ctx, path); err != nil { + return errwrap.Wrapf("failed to delete entry: {{err}}", err) + } + } + + // Clear the accessor index if any + if entry.Accessor != "" { + accessorSaltedID, err := ts.SaltID(revokeCtx, entry.Accessor) + if err != nil { + return err + } + + if err = ts.accessorView(tokenNS).Delete(ctx, accessorSaltedID); err != nil { + return errwrap.Wrapf("failed to delete entry: {{err}}", err) + } + } + + if !skipOrphan { + // Mark all children token as orphan by removing + // their parent index, and clear the parent entry. + // + // Marking the token as orphan should be skipped if it's called by + // revokeTreeInternal to avoid unnecessary view.List operations. Since + // the deletion occurs in a DFS fashion we don't need to perform a delete + // on child prefixes as there will be none (as saltedID entry is a leaf node). + children, err := ts.parentView(tokenNS).List(ctx, saltedID+"/") + if err != nil { + return errwrap.Wrapf("failed to scan for children: {{err}}", err) + } + for _, child := range children { + var childNSID string + childCtx := revokeCtx + child, childNSID = namespace.SplitIDFromString(child) + if childNSID != "" { + childNS, err := NamespaceByID(ctx, childNSID, ts.core) + if err != nil { + return errwrap.Wrapf("failed to get child token: {{err}}", err) + } + if childNS == nil { + return namespace.ErrNoNamespace + } + + childCtx = namespace.ContextWithNamespace(ctx, childNS) + } + + entry, err := ts.lookupInternal(childCtx, child, true, true) + if err != nil { + return errwrap.Wrapf("failed to get child token: {{err}}", err) + } + if entry == nil { + // Seems it's already revoked, so nothing to do here except delete the index + err = ts.parentView(tokenNS).Delete(ctx, child) + if err != nil { + return errwrap.Wrapf("failed to delete child entry: {{err}}", err) + } + continue + } + + lock := locksutil.LockForKey(ts.tokenLocks, entry.ID) + lock.Lock() + + entry.Parent = "" + err = ts.store(childCtx, entry) + if err != nil { + lock.Unlock() + return errwrap.Wrapf("failed to update child token: {{err}}", err) + } + lock.Unlock() + + // Delete the the child storage entry after we update the token entry Since + // paths are not deeply nested (i.e. they are simply + // parenPrefix//), we can simply call view.Delete instead + // of logical.ClearView + err = ts.parentView(tokenNS).Delete(ctx, child) + if err != nil { + return errwrap.Wrapf("failed to delete child entry: {{err}}", err) + } + } + } + + return nil +} + +// revokeTree is used to invalidate a given token and all +// child tokens. +func (ts *TokenStore) revokeTree(ctx context.Context, le *leaseEntry) error { + defer metrics.MeasureSince([]string{"token", "revoke-tree"}, time.Now()) + // Verify the token is not blank + if le.ClientToken == "" { + return fmt.Errorf("cannot tree-revoke blank token") + } + + // In case lookup fails for some reason for the token itself, set the + // context for the next call from the lease entry's NS. This function is + // only called when a lease for a given token is expiring, so it should run + // in the context of the token namespace + revCtx := namespace.ContextWithNamespace(ctx, le.namespace) + + saltedID, err := ts.SaltID(revCtx, le.ClientToken) + if err != nil { + return err + } + + // Nuke the entire tree recursively + return ts.revokeTreeInternal(revCtx, saltedID) +} + +// revokeTreeInternal is used to invalidate a given token and all +// child tokens. +// Updated to be non-recursive and revoke child tokens +// before parent tokens(DFS). +func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error { + dfs := []string{id} + seenIDs := make(map[string]struct{}) + + var ns *namespace.Namespace + + te, err := ts.lookupInternal(ctx, id, true, true) + if err != nil { + return err + } + if te == nil { + ns, err = namespace.FromContext(ctx) + if err != nil { + return err + } + } else { + ns, err = NamespaceByID(ctx, te.NamespaceID, ts.core) + if err != nil { + return err + } + } + if ns == nil { + return fmt.Errorf("failed to find namespace for token revocation") + } + + for l := len(dfs); l > 0; l = len(dfs) { + id := dfs[len(dfs)-1] + seenIDs[id] = struct{}{} + + saltedCtx := ctx + saltedNS := ns + saltedID, saltedNSID := namespace.SplitIDFromString(id) + if saltedNSID != "" { + saltedNS, err = NamespaceByID(ctx, saltedNSID, ts.core) + if err != nil { + return errwrap.Wrapf("failed to find namespace for token revocation: {{err}}", err) + } + + saltedCtx = namespace.ContextWithNamespace(ctx, saltedNS) + } + + path := saltedID + "/" + childrenRaw, err := ts.parentView(saltedNS).List(saltedCtx, path) + if err != nil { + return errwrap.Wrapf("failed to scan for children: {{err}}", err) + } + + // Filter the child list to remove any items that have ever been in the dfs stack. + // This is a robustness check, as a parent/child cycle can lead to an OOM crash. + children := make([]string, 0, len(childrenRaw)) + for _, child := range childrenRaw { + if _, seen := seenIDs[child]; !seen { + children = append(children, child) + } else { + if err = ts.parentView(saltedNS).Delete(saltedCtx, path+child); err != nil { + return errwrap.Wrapf("failed to delete entry: {{err}}", err) + } + + ts.Logger().Warn("token cycle found", "token", child) + } + } + + // If the length of the children array is zero, + // then we are at a leaf node. + if len(children) == 0 { + // Whenever revokeInternal is called, the token will be removed immediately and + // any underlying secrets will be handed off to the expiration manager which will + // take care of expiring them. If Vault is restarted, any revoked tokens + // would have been deleted, and any pending leases for deletion will be restored + // by the expiration manager. + if err := ts.revokeInternal(saltedCtx, saltedID, true); err != nil { + return errwrap.Wrapf("failed to revoke entry: {{err}}", err) + } + // If the length of l is equal to 1, then the last token has been deleted + if l == 1 { + return nil + } + dfs = dfs[:len(dfs)-1] + } else { + // If we make it here, there are children and they must be appended. + dfs = append(dfs, children...) + } + } + + return nil +} + +func (c *Core) IsBatchTokenCreationRequest(ctx context.Context, path string) (bool, error) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + + if c.tokenStore == nil { + return false, fmt.Errorf("no token store") + } + + name := strings.TrimPrefix(path, "auth/token/create/") + roleEntry, err := c.tokenStore.tokenStoreRole(ctx, name) + if err != nil { + return false, err + } + if roleEntry == nil { + return false, fmt.Errorf("unknown role") + } + return roleEntry.TokenType == logical.TokenTypeBatch, nil +} + +// handleCreateAgainstRole handles the auth/token/create path for a role +func (ts *TokenStore) handleCreateAgainstRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("role_name").(string) + roleEntry, err := ts.tokenStoreRole(ctx, name) + if err != nil { + return nil, err + } + if roleEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role %s", name)), nil + } + + return ts.handleCreateCommon(ctx, req, d, false, roleEntry) +} + +func (ts *TokenStore) lookupByAccessor(ctx context.Context, id string, salted, tainted bool) (accessorEntry, error) { + var aEntry accessorEntry + + ns, err := namespace.FromContext(ctx) + if err != nil { + return aEntry, err + } + + lookupID := id + if !salted { + _, nsID := namespace.SplitIDFromString(id) + if nsID != "" { + accessorNS, err := NamespaceByID(ctx, nsID, ts.core) + if err != nil { + return aEntry, err + } + if accessorNS != nil { + if accessorNS.ID != ns.ID { + ns = accessorNS + ctx = namespace.ContextWithNamespace(ctx, accessorNS) + } + } + } else { + // Any non-root-ns token should have an accessor and child + // namespaces cannot have custom IDs. If someone omits or tampers + // with it, the lookup in the root namespace simply won't work. + ns = namespace.RootNamespace + ctx = namespace.ContextWithNamespace(ctx, ns) + } + + lookupID, err = ts.SaltID(ctx, id) + if err != nil { + return aEntry, err + } + } + + entry, err := ts.accessorView(ns).Get(ctx, lookupID) + + if err != nil { + return aEntry, errwrap.Wrapf("failed to read index using accessor: {{err}}", err) + } + if entry == nil { + return aEntry, &logical.StatusBadRequest{Err: "invalid accessor"} + } + + err = jsonutil.DecodeJSON(entry.Value, &aEntry) + // If we hit an error, assume it's a pre-struct straight token ID + if err != nil { + te, err := ts.lookupInternal(ctx, string(entry.Value), false, tainted) + if err != nil { + return accessorEntry{}, errwrap.Wrapf("failed to look up token using accessor index: {{err}}", err) + } + // It's hard to reason about what to do here if te is nil -- it may be + // that the token was revoked async, or that it's an old accessor index + // entry that was somehow not cleared up, or or or. A nonexistent token + // entry on lookup is nil, not an error, so we keep that behavior here + // to be safe...the token ID is simply not filled in. + if te != nil { + aEntry.TokenID = te.ID + aEntry.AccessorID = te.Accessor + aEntry.NamespaceID = te.NamespaceID + } + } + + if aEntry.NamespaceID == "" { + aEntry.NamespaceID = namespace.RootNamespaceID + } + + return aEntry, nil +} + +// handleTidy handles the cleaning up of leaked accessor storage entries and +// cleaning up of leases that are associated to tokens that are expired. +func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if !atomic.CompareAndSwapUint32(ts.tidyLock, 0, 1) { + resp := &logical.Response{} + resp.AddWarning("Tidy operation already in progress.") + return resp, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, errwrap.Wrapf("failed to get namespace from context: {{err}}", err) + } + + go func() { + defer atomic.StoreUint32(ts.tidyLock, 0) + + logger := ts.logger.Named("tidy") + + var tidyErrors *multierror.Error + + doTidy := func() error { + + ts.logger.Info("beginning tidy operation on tokens") + defer ts.logger.Info("finished tidy operation on tokens") + + quitCtx := namespace.ContextWithNamespace(ts.quitContext, ns) + + // List out all the accessors + saltedAccessorList, err := ts.accessorView(ns).List(quitCtx, "") + if err != nil { + return errwrap.Wrapf("failed to fetch accessor index entries: {{err}}", err) + } + + // First, clean up secondary index entries that are no longer valid + parentList, err := ts.parentView(ns).List(quitCtx, "") + if err != nil { + return errwrap.Wrapf("failed to fetch secondary index entries: {{err}}", err) + } + + // List all the cubbyhole storage keys + cubbyholeKeys, err := ts.cubbyholeBackend.storageView.List(quitCtx, "") + if err != nil { + return errwrap.Wrapf("failed to fetch cubbyhole storage keys: {{err}}", err) + } + + var countParentEntries, deletedCountParentEntries, countParentList, deletedCountParentList int64 + + // Scan through the secondary index entries; if there is an entry + // with the token's salt ID at the end, remove it + for _, parent := range parentList { + countParentEntries++ + + // Get the children + children, err := ts.parentView(ns).List(quitCtx, parent) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read secondary index: {{err}}", err)) + continue + } + + // First check if the salt ID of the parent exists, and if not mark this so + // that deletion of children later with this loop below applies to all + // children + originalChildrenCount := int64(len(children)) + exists, _ := ts.lookupInternal(quitCtx, strings.TrimSuffix(parent, "/"), true, true) + if exists == nil { + ts.logger.Debug("deleting invalid parent prefix entry", "index", parentPrefix+parent) + } + + var deletedChildrenCount int64 + for _, child := range children { + countParentList++ + if countParentList%500 == 0 { + ts.logger.Info("checking validity of tokens in secondary index list", "progress", countParentList) + } + + // Look up tainted entries so we can be sure that if this isn't + // found, it doesn't exist. Doing the following without locking + // since appropriate locks cannot be held with salted token IDs. + // Also perform deletion if the parent doesn't exist any more. + te, _ := ts.lookupInternal(quitCtx, child, true, true) + // If the child entry is not nil, but the parent doesn't exist, then turn + // that child token into an orphan token. Theres no deletion in this case. + if te != nil && exists == nil { + lock := locksutil.LockForKey(ts.tokenLocks, te.ID) + lock.Lock() + + te.Parent = "" + err = ts.store(quitCtx, te) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to convert child token into an orphan token: {{err}}", err)) + } + lock.Unlock() + continue + } + // Otherwise, if the entry doesn't exist, or if the parent doesn't exist go + // on with the delete on the secondary index + if te == nil || exists == nil { + index := parent + child + ts.logger.Debug("deleting invalid secondary index", "index", index) + err = ts.parentView(ns).Delete(quitCtx, index) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete secondary index: {{err}}", err)) + continue + } + deletedChildrenCount++ + } + } + // Add current children deleted count to the total count + deletedCountParentList += deletedChildrenCount + // N.B.: We don't call delete on the parent prefix since physical.Backend.Delete + // implementations should be in charge of deleting empty prefixes. + // If we deleted all the children, then add that to our deleted parent entries count. + if originalChildrenCount == deletedChildrenCount { + deletedCountParentEntries++ + } + } + + var countAccessorList, + countCubbyholeKeys, + deletedCountAccessorEmptyToken, + deletedCountAccessorInvalidToken, + deletedCountInvalidTokenInAccessor, + deletedCountInvalidCubbyholeKey int64 + + validCubbyholeKeys := make(map[string]bool) + + // For each of the accessor, see if the token ID associated with it is + // a valid one. If not, delete the leases associated with that token + // and delete the accessor as well. + for _, saltedAccessor := range saltedAccessorList { + countAccessorList++ + if countAccessorList%500 == 0 { + ts.logger.Info("checking if accessors contain valid tokens", "progress", countAccessorList) + } + + accessorEntry, err := ts.lookupByAccessor(quitCtx, saltedAccessor, true, true) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read the accessor index: {{err}}", err)) + continue + } + + // A valid accessor storage entry should always have a token ID + // in it. If not, it is an invalid accessor entry and needs to + // be deleted. + if accessorEntry.TokenID == "" { + // If deletion of accessor fails, move on to the next + // item since this is just a best-effort operation + err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete the accessor index: {{err}}", err)) + continue + } + deletedCountAccessorEmptyToken++ + } + + lock := locksutil.LockForKey(ts.tokenLocks, accessorEntry.TokenID) + lock.RLock() + + // Look up tainted variants so we only find entries that truly don't + // exist + te, err := ts.lookupInternal(quitCtx, accessorEntry.TokenID, false, true) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup tainted ID: {{err}}", err)) + lock.RUnlock() + continue + } + + lock.RUnlock() + + switch { + case te == nil: + // If token entry is not found assume that the token is not valid any + // more and conclude that accessor, leases, and secondary index entries + // for this token should not exist as well. + + ts.logger.Info("deleting token with nil entry referenced by accessor", "salted_accessor", saltedAccessor) + + // RevokeByToken expects a '*logical.TokenEntry'. For the + // purposes of tidying, it is sufficient if the token + // entry only has ID set. + tokenEntry := &logical.TokenEntry{ + ID: accessorEntry.TokenID, + NamespaceID: accessorEntry.NamespaceID, + } + + // Attempt to revoke the token. This will also revoke + // the leases associated with the token. + err = ts.expiration.RevokeByToken(quitCtx, tokenEntry) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to revoke leases of expired token: {{err}}", err)) + continue + } + deletedCountInvalidTokenInAccessor++ + + // If deletion of accessor fails, move on to the next item since + // this is just a best-effort operation. We do this last so that on + // next run if something above failed we still have the accessor + // entry to try again. + err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete accessor entry: {{err}}", err)) + continue + } + deletedCountAccessorInvalidToken++ + default: + // Cache the cubbyhole storage key when the token is valid + switch { + case te.NamespaceID == namespace.RootNamespaceID && !strings.HasPrefix(te.ID, "s."): + saltedID, err := ts.SaltID(quitCtx, te.ID) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to create salted token id: {{err}}", err)) + continue + } + validCubbyholeKeys[salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash)] = true + default: + if te.CubbyholeID == "" { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("missing cubbyhole ID for a valid token")) + continue + } + validCubbyholeKeys[te.CubbyholeID] = true + } + } + } + + // Revoke invalid cubbyhole storage keys + for _, key := range cubbyholeKeys { + countCubbyholeKeys++ + if countCubbyholeKeys%500 == 0 { + ts.logger.Info("checking if there are invalid cubbyholes", "progress", countCubbyholeKeys) + } + + key := strings.TrimSuffix(key, "/") + if !validCubbyholeKeys[key] { + ts.logger.Info("deleting invalid cubbyhole", "key", key) + err = ts.cubbyholeBackend.revoke(quitCtx, key) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke cubbyhole key %q: {{err}}", key), err)) + } + deletedCountInvalidCubbyholeKey++ + } + } + + ts.logger.Info("number of entries scanned in parent prefix", "count", countParentEntries) + ts.logger.Info("number of entries deleted in parent prefix", "count", deletedCountParentEntries) + ts.logger.Info("number of tokens scanned in parent index list", "count", countParentList) + ts.logger.Info("number of tokens revoked in parent index list", "count", deletedCountParentList) + ts.logger.Info("number of accessors scanned", "count", countAccessorList) + ts.logger.Info("number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken) + ts.logger.Info("number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor) + ts.logger.Info("number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken) + ts.logger.Info("number of deleted cubbyhole keys that were invalid", "count", deletedCountInvalidCubbyholeKey) + + return tidyErrors.ErrorOrNil() + } + + if err := doTidy(); err != nil { + logger.Error("error running tidy", "error", err) + return + } + }() + + resp := &logical.Response{} + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +// handleUpdateLookupAccessor handles the auth/token/lookup-accessor path for returning +// the properties of the token associated with the accessor +func (ts *TokenStore) handleUpdateLookupAccessor(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + accessor := data.Get("accessor").(string) + if accessor == "" { + return nil, &logical.StatusBadRequest{Err: "missing accessor"} + } + + aEntry, err := ts.lookupByAccessor(ctx, accessor, false, false) + if err != nil { + return nil, err + } + + // Prepare the field data required for a lookup call + d := &framework.FieldData{ + Raw: map[string]interface{}{ + "token": aEntry.TokenID, + }, + Schema: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token to lookup", + }, + }, + } + resp, err := ts.handleLookup(ctx, req, d) + if err != nil { + return nil, err + } + if resp == nil { + return nil, fmt.Errorf("failed to lookup the token") + } + if resp.IsError() { + return resp, nil + + } + + // Remove the token ID from the response + if resp.Data != nil { + resp.Data["id"] = "" + } + + return resp, nil +} + +func (ts *TokenStore) handleUpdateRenewAccessor(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + accessor := data.Get("accessor").(string) + if accessor == "" { + return nil, &logical.StatusBadRequest{Err: "missing accessor"} + } + + aEntry, err := ts.lookupByAccessor(ctx, accessor, false, false) + if err != nil { + return nil, err + } + + // Prepare the field data required for a lookup call + d := &framework.FieldData{ + Raw: map[string]interface{}{ + "token": aEntry.TokenID, + }, + Schema: map[string]*framework.FieldSchema{ + "token": { + Type: framework.TypeString, + }, + "increment": { + Type: framework.TypeDurationSecond, + }, + }, + } + if inc, ok := data.GetOk("increment"); ok { + d.Raw["increment"] = inc + } + + resp, err := ts.handleRenew(ctx, req, d) + if err != nil { + return nil, err + } + if resp == nil { + return nil, fmt.Errorf("failed to lookup the token") + } + if resp.IsError() { + return resp, nil + } + + // Remove the token ID from the response + if resp.Auth != nil { + resp.Auth.ClientToken = "" + } + + return resp, nil +} + +// handleUpdateRevokeAccessor handles the auth/token/revoke-accessor path for revoking +// the token associated with the accessor +func (ts *TokenStore) handleUpdateRevokeAccessor(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + accessor := data.Get("accessor").(string) + if accessor == "" { + return nil, &logical.StatusBadRequest{Err: "missing accessor"} + } + + aEntry, err := ts.lookupByAccessor(ctx, accessor, false, true) + if err != nil { + return nil, err + } + + te, err := ts.Lookup(ctx, aEntry.TokenID) + if err != nil { + return nil, err + } + if te == nil { + return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest + } + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, ts.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS) + leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, te) + if err != nil { + return nil, err + } + + err = ts.expiration.Revoke(revokeCtx, leaseID) + if err != nil { + return nil, err + } + + return nil, nil +} + +// handleCreate handles the auth/token/create path for creation of new orphan +// tokens +func (ts *TokenStore) handleCreateOrphan(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return ts.handleCreateCommon(ctx, req, d, true, nil) +} + +// handleCreate handles the auth/token/create path for creation of new non-orphan +// tokens +func (ts *TokenStore) handleCreate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return ts.handleCreateCommon(ctx, req, d, false, nil) +} + +// handleCreateCommon handles the auth/token/create path for creation of new tokens +func (ts *TokenStore) handleCreateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, orphan bool, role *tsRoleEntry) (*logical.Response, error) { + // Read the parent policy + parent, err := ts.Lookup(ctx, req.ClientToken) + if err != nil { + return nil, errwrap.Wrapf("parent token lookup failed: {{err}}", err) + } + if parent == nil { + return logical.ErrorResponse("parent token lookup failed: no parent found"), logical.ErrInvalidRequest + } + if parent.Type == logical.TokenTypeBatch { + return logical.ErrorResponse("batch tokens cannot create more tokens"), nil + } + + // A token with a restricted number of uses cannot create a new token + // otherwise it could escape the restriction count. + if parent.NumUses > 0 { + return logical.ErrorResponse("restricted use token cannot generate child tokens"), + logical.ErrInvalidRequest + } + + // Check if the client token has sudo/root privileges for the requested path + isSudo := ts.System().(extendedSystemView).SudoPrivilege(ctx, req.MountPoint+req.Path, req.ClientToken) + + // Read and parse the fields + var data struct { + ID string + Policies []string + Metadata map[string]string `mapstructure:"meta"` + NoParent bool `mapstructure:"no_parent"` + NoDefaultPolicy bool `mapstructure:"no_default_policy"` + Lease string + TTL string + Renewable *bool + ExplicitMaxTTL string `mapstructure:"explicit_max_ttl"` + DisplayName string `mapstructure:"display_name"` + NumUses int `mapstructure:"num_uses"` + Period string + Type string `mapstructure:"type"` + EntityAlias string `mapstructure:"entity_alias"` + } + if err := mapstructure.WeakDecode(req.Data, &data); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error decoding request: %s", err)), logical.ErrInvalidRequest + } + + // If the context's namespace is different from the parent and this is an + // orphan token creation request, then this is an admin token generation for + // the namespace + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + if ns.ID != parent.NamespaceID { + parentNS, err := NamespaceByID(ctx, parent.NamespaceID, ts.core) + if err != nil { + ts.logger.Error("error looking up parent namespace", "error", err, "parent_namespace", parent.NamespaceID) + return nil, ErrInternalError + } + if parentNS == nil { + ts.logger.Error("could not find information for parent namespace", "parent_namespace", parent.NamespaceID) + return nil, ErrInternalError + } + + if !isSudo { + return logical.ErrorResponse("root or sudo privileges required to directly generate a token in a child namespace"), logical.ErrInvalidRequest + } + + if strutil.StrListContains(data.Policies, "root") { + return logical.ErrorResponse("root tokens may not be created from a parent namespace"), logical.ErrInvalidRequest + } + } + + renewable := true + if data.Renewable != nil { + renewable = *data.Renewable + } + + tokenType := logical.TokenTypeService + tokenTypeStr := data.Type + if role != nil { + switch role.TokenType { + case logical.TokenTypeDefault, logical.TokenTypeDefaultService: + // Use the user-given value, but fall back to service + case logical.TokenTypeDefaultBatch: + // Use the user-given value, but fall back to batch + if tokenTypeStr == "" { + tokenTypeStr = logical.TokenTypeBatch.String() + } + case logical.TokenTypeService: + tokenTypeStr = logical.TokenTypeService.String() + case logical.TokenTypeBatch: + tokenTypeStr = logical.TokenTypeBatch.String() + default: + return logical.ErrorResponse(fmt.Sprintf("role being used for token creation contains invalid token type %q", role.TokenType.String())), nil + } + } + switch tokenTypeStr { + case "", "service": + case "batch": + var badReason string + switch { + case data.ExplicitMaxTTL != "": + dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL) + if err != nil { + return logical.ErrorResponse(`"explicit_max_ttl" value could not be parsed`), nil + } + if dur != 0 { + badReason = "explicit_max_ttl" + } + case data.NumUses != 0: + badReason = "num_uses" + case data.Period != "": + dur, err := parseutil.ParseDurationSecond(data.Period) + if err != nil { + return logical.ErrorResponse(`"period" value could not be parsed`), nil + } + if dur != 0 { + badReason = "period" + } + } + if badReason != "" { + return logical.ErrorResponse(fmt.Sprintf("batch tokens cannot have %q set", badReason)), nil + } + tokenType = logical.TokenTypeBatch + renewable = false + default: + return logical.ErrorResponse("invalid 'token_type' value"), logical.ErrInvalidRequest + } + + // Verify the number of uses is positive + if data.NumUses < 0 { + return logical.ErrorResponse("number of uses cannot be negative"), + logical.ErrInvalidRequest + } + + // Verify the entity alias + var explicitEntityID string + if data.EntityAlias != "" { + // Parameter is only allowed in combination with token role + if role == nil { + return logical.ErrorResponse("'entity_alias' is only allowed in combination with token role"), logical.ErrInvalidRequest + } + + // Check if there is a concrete match + if !strutil.StrListContains(role.AllowedEntityAliases, data.EntityAlias) && + !strutil.StrListContainsGlob(role.AllowedEntityAliases, data.EntityAlias) { + return logical.ErrorResponse("invalid 'entity_alias' value"), logical.ErrInvalidRequest + } + + // Get mount accessor which is required to lookup entity alias + mountValidationResp := ts.core.router.MatchingMountByAccessor(req.MountAccessor) + if mountValidationResp == nil { + return logical.ErrorResponse("auth token mount accessor not found"), nil + } + + // Create alias for later processing + alias := &logical.Alias{ + Name: data.EntityAlias, + MountAccessor: mountValidationResp.Accessor, + MountType: mountValidationResp.Type, + } + + // Create or fetch entity from entity alias + entity, err := ts.core.identityStore.CreateOrFetchEntity(ctx, alias) + if err != nil { + return nil, err + } + if entity == nil { + return nil, errors.New("failed to create or fetch entity from given entity alias") + } + + // Validate that the entity is not disabled + if entity.Disabled { + return logical.ErrorResponse("entity from given entity alias is disabled"), logical.ErrPermissionDenied + } + + // Set new entity id + explicitEntityID = entity.ID + } + + // Setup the token entry + te := logical.TokenEntry{ + Parent: req.ClientToken, + + // The mount point is always the same since we have only one token + // store; using req.MountPoint causes trouble in tests since they don't + // have an official mount + Path: fmt.Sprintf("auth/token/%s", req.Path), + + Meta: data.Metadata, + DisplayName: "token", + NumUses: data.NumUses, + CreationTime: time.Now().Unix(), + NamespaceID: ns.ID, + Type: tokenType, + } + + // If the role is not nil, we add the role name as part of the token's + // path. This makes it much easier to later revoke tokens that were issued + // by a role (using revoke-prefix). Users can further specify a PathSuffix + // in the role; that way they can use something like "v1", "v2" to indicate + // role revisions, and revoke only tokens issued with a previous revision. + if role != nil { + te.Role = role.Name + + // If renewable hasn't been disabled in the call and the role has + // renewability disabled, set renewable false + if renewable && !role.Renewable { + renewable = false + } + + // Update te.NumUses which is equal to req.Data["num_uses"] at this point + // 0 means unlimited so 1 is actually less than 0 + switch { + case role.TokenNumUses == 0: + case te.NumUses == 0: + te.NumUses = role.TokenNumUses + case role.TokenNumUses < te.NumUses: + te.NumUses = role.TokenNumUses + } + + if role.PathSuffix != "" { + te.Path = fmt.Sprintf("%s/%s", te.Path, role.PathSuffix) + } + } + + // Attach the given display name if any + if data.DisplayName != "" { + full := "token-" + data.DisplayName + full = displayNameSanitize.ReplaceAllString(full, "-") + full = strings.TrimSuffix(full, "-") + te.DisplayName = full + } + + // Allow specifying the ID of the token if the client has root or sudo privileges + if data.ID != "" { + if !isSudo { + return logical.ErrorResponse("root or sudo privileges required to specify token id"), + logical.ErrInvalidRequest + } + if ns.ID != namespace.RootNamespaceID { + return logical.ErrorResponse("token IDs can only be manually specified in the root namespace"), + logical.ErrInvalidRequest + } + te.ID = data.ID + } + + resp := &logical.Response{} + + var addDefault bool + + // N.B.: The logic here uses various calculations as to whether default + // should be added. In the end we decided that if NoDefaultPolicy is set it + // should be stripped out regardless, *but*, the logic of when it should + // and shouldn't be added is kept because we want to do subset comparisons + // based on adding default when it's correct to do so. + switch { + case role != nil && (len(role.AllowedPolicies) > 0 || len(role.DisallowedPolicies) > 0): + // Holds the final set of policies as they get munged + var finalPolicies []string + + // We don't make use of the global one because roles with allowed or + // disallowed set do their own policy rules + var localAddDefault bool + + // If the request doesn't say not to add "default" and if "default" + // isn't in the disallowed list, add it. This is in line with the idea + // that roles, when allowed/disallowed ar set, allow a subset of + // policies to be set disjoint from the parent token's policies. + if !data.NoDefaultPolicy && !role.TokenNoDefaultPolicy && !strutil.StrListContains(role.DisallowedPolicies, "default") { + localAddDefault = true + } + + // Start with passed-in policies as a baseline, if they exist + if len(data.Policies) > 0 { + finalPolicies = policyutil.SanitizePolicies(data.Policies, localAddDefault) + } + + var sanitizedRolePolicies []string + + // First check allowed policies; if policies are specified they will be + // checked, otherwise if an allowed set exists that will be the set + // that is used + if len(role.AllowedPolicies) > 0 { + // Note that if "default" is already in allowed, and also in + // disallowed, this will still result in an error later since this + // doesn't strip out default + sanitizedRolePolicies = policyutil.SanitizePolicies(role.AllowedPolicies, localAddDefault) + + if len(finalPolicies) == 0 { + finalPolicies = sanitizedRolePolicies + } else { + if !strutil.StrListSubset(sanitizedRolePolicies, finalPolicies) { + return logical.ErrorResponse(fmt.Sprintf("token policies (%q) must be subset of the role's allowed policies (%q)", finalPolicies, sanitizedRolePolicies)), logical.ErrInvalidRequest + } + } + } else { + // Assign parent policies if none have been requested. As this is a + // role, add default unless explicitly disabled. + if len(finalPolicies) == 0 { + finalPolicies = policyutil.SanitizePolicies(parent.Policies, localAddDefault) + } + } + + if len(role.DisallowedPolicies) > 0 { + // We don't add the default here because we only want to disallow it if it's explicitly set + sanitizedRolePolicies = strutil.RemoveDuplicates(role.DisallowedPolicies, true) + + for _, finalPolicy := range finalPolicies { + if strutil.StrListContains(sanitizedRolePolicies, finalPolicy) { + return logical.ErrorResponse(fmt.Sprintf("token policy %q is disallowed by this role", finalPolicy)), logical.ErrInvalidRequest + } + } + } + + data.Policies = finalPolicies + + // We are creating a token from a parent namespace. We should only use the input + // policies. + case ns.ID != parent.NamespaceID: + addDefault = !data.NoDefaultPolicy + + // No policies specified, inherit parent + case len(data.Policies) == 0: + // Only inherit "default" if the parent already has it, so don't touch addDefault here + data.Policies = policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy) + + // When a role is not in use or does not specify allowed/disallowed, only + // permit policies to be a subset unless the client has root or sudo + // privileges. Default is added in this case if the parent has it, unless + // the client specified for it not to be added. + case !isSudo: + // Sanitize passed-in and parent policies before comparison + sanitizedInputPolicies := policyutil.SanitizePolicies(data.Policies, policyutil.DoNotAddDefaultPolicy) + sanitizedParentPolicies := policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy) + + if !strutil.StrListSubset(sanitizedParentPolicies, sanitizedInputPolicies) { + return logical.ErrorResponse("child policies must be subset of parent"), logical.ErrInvalidRequest + } + + // If the parent has default, and they haven't requested not to get it, + // add it. Note that if they have explicitly put "default" in + // data.Policies it will still be added because NoDefaultPolicy + // controls *automatic* adding. + if !data.NoDefaultPolicy && strutil.StrListContains(parent.Policies, "default") { + addDefault = true + } + + // Add default by default in this case unless requested not to + case isSudo: + addDefault = !data.NoDefaultPolicy + } + + te.Policies = policyutil.SanitizePolicies(data.Policies, addDefault) + + // Yes, this is a little inefficient to do it like this, but meh + if data.NoDefaultPolicy { + te.Policies = strutil.StrListDelete(te.Policies, "default") + } + + // Prevent internal policies from being assigned to tokens + for _, policy := range te.Policies { + if strutil.StrListContains(nonAssignablePolicies, policy) { + return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil + } + } + + if strutil.StrListContains(te.Policies, "root") { + // Prevent attempts to create a root token without an actual root token as parent. + // This is to thwart privilege escalation by tokens having 'sudo' privileges. + if !strutil.StrListContains(parent.Policies, "root") { + return logical.ErrorResponse("root tokens may not be created without parent token being root"), logical.ErrInvalidRequest + } + + if te.Type == logical.TokenTypeBatch { + // Batch tokens cannot be revoked so we should never have root batch tokens + return logical.ErrorResponse("batch tokens cannot be root tokens"), nil + } + } + + // + // NOTE: Do not modify policies below this line. We need the checks above + // to be the last checks as they must look at the final policy set. + // + + switch { + case role != nil: + if role.Orphan { + te.Parent = "" + } + + if len(role.TokenBoundCIDRs) > 0 { + te.BoundCIDRs = role.TokenBoundCIDRs + } + + case data.NoParent: + // Only allow an orphan token if the client has sudo policy + if !isSudo { + return logical.ErrorResponse("root or sudo privileges required to create orphan token"), + logical.ErrInvalidRequest + } + + te.Parent = "" + + default: + // This comes from create-orphan, which can be properly ACLd + if orphan { + te.Parent = "" + } + } + + // At this point, it is clear whether the token is going to be an orphan or + // not. If setEntityID is set, the entity identifier will be overwritten. + // Otherwise, if the token is not going to be an orphan, inherit the parent's + // entity identifier into the child token. + switch { + case explicitEntityID != "": + // Overwrite the entity identifier + te.EntityID = explicitEntityID + case te.Parent != "": + te.EntityID = parent.EntityID + + // If the parent has bound CIDRs, copy those into the child. We don't + // do this if role is not nil because then we always use the role's + // bound CIDRs; roles allow escalation of privilege in proper + // circumstances. + if role == nil { + te.BoundCIDRs = parent.BoundCIDRs + } + } + + var explicitMaxTTLToUse time.Duration + if data.ExplicitMaxTTL != "" { + dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if dur < 0 { + return logical.ErrorResponse("explicit_max_ttl must be positive"), logical.ErrInvalidRequest + } + te.ExplicitMaxTTL = dur + explicitMaxTTLToUse = dur + } + + var periodToUse time.Duration + if data.Period != "" { + dur, err := parseutil.ParseDurationSecond(data.Period) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + switch { + case dur < 0: + return logical.ErrorResponse("period must be positive"), logical.ErrInvalidRequest + case dur == 0: + default: + if !isSudo { + return logical.ErrorResponse("root or sudo privileges required to create periodic token"), + logical.ErrInvalidRequest + } + te.Period = dur + periodToUse = dur + } + } + + // Parse the TTL/lease if any + if data.TTL != "" { + dur, err := parseutil.ParseDurationSecond(data.TTL) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if dur < 0 { + return logical.ErrorResponse("ttl must be positive"), logical.ErrInvalidRequest + } + te.TTL = dur + } else if data.Lease != "" { + // This block is compatibility + dur, err := time.ParseDuration(data.Lease) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if dur < 0 { + return logical.ErrorResponse("lease must be positive"), logical.ErrInvalidRequest + } + te.TTL = dur + } + + // Set the lesser period/explicit max TTL if defined both in arguments and + // in role. Batch tokens will error out if not set via role, but here we + // need to explicitly check + if role != nil && te.Type != logical.TokenTypeBatch { + if role.TokenExplicitMaxTTL != 0 { + switch { + case explicitMaxTTLToUse == 0: + explicitMaxTTLToUse = role.TokenExplicitMaxTTL + default: + if role.TokenExplicitMaxTTL < explicitMaxTTLToUse { + explicitMaxTTLToUse = role.TokenExplicitMaxTTL + } + resp.AddWarning(fmt.Sprintf("Explicit max TTL specified both during creation call and in role; using the lesser value of %d seconds", int64(explicitMaxTTLToUse.Seconds()))) + } + } + if role.TokenPeriod != 0 { + switch { + case periodToUse == 0: + periodToUse = role.TokenPeriod + default: + if role.TokenPeriod < periodToUse { + periodToUse = role.TokenPeriod + } + resp.AddWarning(fmt.Sprintf("Period specified both during creation call and in role; using the lesser value of %d seconds", int64(periodToUse.Seconds()))) + } + } + } + + sysView := ts.System().(extendedSystemView) + + // Only calculate a TTL if you are A) periodic, B) have a TTL, C) do not have a TTL and are not a root token + if periodToUse > 0 || te.TTL > 0 || (te.TTL == 0 && !strutil.StrListContains(te.Policies, "root")) { + ttl, warnings, err := framework.CalculateTTL(sysView, 0, te.TTL, periodToUse, 0, explicitMaxTTLToUse, time.Unix(te.CreationTime, 0)) + if err != nil { + return nil, err + } + for _, warning := range warnings { + resp.AddWarning(warning) + } + te.TTL = ttl + } + + // Root tokens are still bound by explicit max TTL + if te.TTL == 0 && explicitMaxTTLToUse > 0 { + te.TTL = explicitMaxTTLToUse + } + + // Don't advertise non-expiring root tokens as renewable, as attempts to + // renew them are denied. Don't CIDR-restrict these either. + if te.TTL == 0 { + if parent.TTL != 0 { + return logical.ErrorResponse("expiring root tokens cannot create non-expiring root tokens"), logical.ErrInvalidRequest + } + renewable = false + te.BoundCIDRs = nil + } + + if te.ID != "" { + resp.AddWarning("Supplying a custom ID for the token uses the weaker SHA1 hashing instead of the more secure SHA2-256 HMAC for token obfuscation. SHA1 hashed tokens on the wire leads to less secure lookups.") + } + + // Create the token + if err := ts.create(ctx, &te); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Generate the response + resp.Auth = &logical.Auth{ + NumUses: te.NumUses, + DisplayName: te.DisplayName, + Policies: te.Policies, + Metadata: te.Meta, + LeaseOptions: logical.LeaseOptions{ + TTL: te.TTL, + Renewable: renewable, + }, + ClientToken: te.ID, + Accessor: te.Accessor, + EntityID: te.EntityID, + Period: periodToUse, + ExplicitMaxTTL: explicitMaxTTLToUse, + CreationPath: te.Path, + TokenType: te.Type, + Orphan: te.Parent == "", + } + + for _, p := range te.Policies { + policy, err := ts.core.policyStore.GetPolicy(ctx, p, PolicyTypeToken) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("could not look up policy %s", p)), nil + } + if policy == nil { + resp.AddWarning(fmt.Sprintf("Policy %q does not exist", p)) + } + } + + return resp, nil +} + +// handleRevokeSelf handles the auth/token/revoke-self path for revocation of tokens +// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke +// the token and all children anyways, but that is only available when there is a lease. +func (ts *TokenStore) handleRevokeSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return ts.revokeCommon(ctx, req, data, req.ClientToken) +} + +// handleRevokeTree handles the auth/token/revoke/id path for revocation of tokens +// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke +// the token and all children anyways, but that is only available when there is a lease. +func (ts *TokenStore) handleRevokeTree(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + id := data.Get("token").(string) + if id == "" { + return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest + } + + if resp, err := ts.revokeCommon(ctx, req, data, id); resp != nil || err != nil { + return resp, err + } + + return nil, nil +} + +func (ts *TokenStore) revokeCommon(ctx context.Context, req *logical.Request, data *framework.FieldData, id string) (*logical.Response, error) { + te, err := ts.Lookup(ctx, id) + if err != nil { + return nil, err + } + if te == nil { + return nil, nil + } + + if te.Type == logical.TokenTypeBatch { + return logical.ErrorResponse("batch tokens cannot be revoked"), nil + } + + tokenNS, err := NamespaceByID(ctx, te.NamespaceID, ts.core) + if err != nil { + return nil, err + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS) + leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, te) + if err != nil { + return nil, err + } + + err = ts.expiration.Revoke(revokeCtx, leaseID) + if err != nil { + return nil, err + } + + return nil, nil +} + +// handleRevokeOrphan handles the auth/token/revoke-orphan/id path for revocation of tokens +// in a way that leaves child tokens orphaned. Normally, using sys/revoke/leaseID will revoke +// the token and all children. +func (ts *TokenStore) handleRevokeOrphan(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Parse the id + id := data.Get("token").(string) + if id == "" { + return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest + } + + // Check if the client token has sudo/root privileges for the requested path + isSudo := ts.System().(extendedSystemView).SudoPrivilege(ctx, req.MountPoint+req.Path, req.ClientToken) + + if !isSudo { + return logical.ErrorResponse("root or sudo privileges required to revoke and orphan"), + logical.ErrInvalidRequest + } + + // Do a lookup. Among other things, that will ensure that this is either + // running in the same namespace or a parent. + te, err := ts.Lookup(ctx, id) + if err != nil { + return nil, errwrap.Wrapf("error when looking up token to revoke: {{err}}", err) + } + if te == nil { + return logical.ErrorResponse("token to revoke not found"), logical.ErrInvalidRequest + } + + if te.Type == logical.TokenTypeBatch { + return logical.ErrorResponse("batch tokens cannot be revoked"), nil + } + + // Revoke and orphan + if err := ts.revokeOrphan(ctx, id); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + return nil, nil +} + +func (ts *TokenStore) handleLookupSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + data.Raw["token"] = req.ClientToken + return ts.handleLookup(ctx, req, data) +} + +// handleLookup handles the auth/token/lookup/id path for querying information about +// a particular token. This can be used to see which policies are applicable. +func (ts *TokenStore) handleLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + id := data.Get("token").(string) + if id == "" { + id = req.ClientToken + } + if id == "" { + return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest + } + + lock := locksutil.LockForKey(ts.tokenLocks, id) + lock.RLock() + defer lock.RUnlock() + + out, err := ts.lookupInternal(ctx, id, false, true) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + if out == nil { + return logical.ErrorResponse("bad token"), logical.ErrPermissionDenied + } + + // Generate a response. We purposely omit the parent reference otherwise + // you could escalate your privileges. + resp := &logical.Response{ + Data: map[string]interface{}{ + "id": out.ID, + "accessor": out.Accessor, + "policies": out.Policies, + "path": out.Path, + "meta": out.Meta, + "display_name": out.DisplayName, + "num_uses": out.NumUses, + "orphan": false, + "creation_time": int64(out.CreationTime), + "creation_ttl": int64(out.TTL.Seconds()), + "expire_time": nil, + "ttl": int64(0), + "explicit_max_ttl": int64(out.ExplicitMaxTTL.Seconds()), + "entity_id": out.EntityID, + "type": out.Type.String(), + }, + } + + if out.Parent == "" { + resp.Data["orphan"] = true + } + + if out.Role != "" { + resp.Data["role"] = out.Role + } + + if out.Period != 0 { + resp.Data["period"] = int64(out.Period.Seconds()) + } + + if len(out.BoundCIDRs) > 0 { + resp.Data["bound_cidrs"] = out.BoundCIDRs + } + + tokenNS, err := NamespaceByID(ctx, out.NamespaceID, ts.core) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if tokenNS == nil { + return nil, namespace.ErrNoNamespace + } + + if out.NamespaceID != namespace.RootNamespaceID { + resp.Data["namespace_path"] = tokenNS.Path + } + + // Fetch the last renewal time + leaseTimes, err := ts.expiration.FetchLeaseTimesByToken(ctx, out) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if leaseTimes != nil { + if !leaseTimes.LastRenewalTime.IsZero() { + resp.Data["last_renewal_time"] = leaseTimes.LastRenewalTime.Unix() + resp.Data["last_renewal"] = leaseTimes.LastRenewalTime + } + if !leaseTimes.ExpireTime.IsZero() { + resp.Data["expire_time"] = leaseTimes.ExpireTime + resp.Data["ttl"] = leaseTimes.ttl() + } + renewable, _ := leaseTimes.renewable() + resp.Data["renewable"] = renewable + resp.Data["issue_time"] = leaseTimes.IssueTime + } + + if out.EntityID != "" { + _, identityPolicies, err := ts.core.fetchEntityAndDerivedPolicies(ctx, tokenNS, out.EntityID) + if err != nil { + return nil, err + } + if len(identityPolicies) != 0 { + resp.Data["identity_policies"] = identityPolicies[out.NamespaceID] + delete(identityPolicies, out.NamespaceID) + resp.Data["external_namespace_policies"] = identityPolicies + } + } + + return resp, nil +} + +func (ts *TokenStore) handleRenewSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + data.Raw["token"] = req.ClientToken + return ts.handleRenew(ctx, req, data) +} + +// handleRenew handles the auth/token/renew/id path for renewal of tokens. +// This is used to prevent token expiration and revocation. +func (ts *TokenStore) handleRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + id := data.Get("token").(string) + if id == "" { + return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest + } + incrementRaw := data.Get("increment").(int) + + // Convert the increment + increment := time.Duration(incrementRaw) * time.Second + + // Lookup the token + te, err := ts.Lookup(ctx, id) + if err != nil { + return nil, errwrap.Wrapf("error looking up token to renew: {{err}}", err) + } + if te == nil { + return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest + } + + var resp *logical.Response + + if te.Type == logical.TokenTypeBatch { + return logical.ErrorResponse("batch tokens cannot be renewed"), nil + } + + // Renew the token and its children + resp, err = ts.expiration.RenewToken(ctx, req, te, increment) + + return resp, err +} + +func (ts *TokenStore) authRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if req.Auth == nil { + return nil, fmt.Errorf("request auth is nil") + } + + te, err := ts.Lookup(ctx, req.Auth.ClientToken) + if err != nil { + return nil, errwrap.Wrapf("error looking up token: {{err}}", err) + } + if te == nil { + return nil, fmt.Errorf("no token entry found during lookup") + } + + if te.Role == "" { + req.Auth.Period = te.Period + req.Auth.ExplicitMaxTTL = te.ExplicitMaxTTL + return &logical.Response{Auth: req.Auth}, nil + } + + role, err := ts.tokenStoreRole(ctx, te.Role) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error looking up role %q: {{err}}", te.Role), err) + } + if role == nil { + return nil, fmt.Errorf("original token role %q could not be found, not renewing", te.Role) + } + + req.Auth.Period = role.TokenPeriod + req.Auth.ExplicitMaxTTL = role.TokenExplicitMaxTTL + return &logical.Response{Auth: req.Auth}, nil +} + +func (ts *TokenStore) tokenStoreRole(ctx context.Context, name string) (*tsRoleEntry, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + entry, err := ts.rolesView(ns).Get(ctx, name) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result tsRoleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenType == logical.TokenTypeDefault { + result.TokenType = logical.TokenTypeDefaultService + } + + // Token field upgrades. We preserve the original value for read + // compatibility. + if result.Period > 0 && result.TokenPeriod == 0 { + result.TokenPeriod = result.Period + } + if result.ExplicitMaxTTL > 0 && result.TokenExplicitMaxTTL == 0 { + result.TokenExplicitMaxTTL = result.ExplicitMaxTTL + } + if len(result.BoundCIDRs) > 0 && len(result.TokenBoundCIDRs) == 0 { + result.TokenBoundCIDRs = result.BoundCIDRs + } + + return &result, nil +} + +func (ts *TokenStore) tokenStoreRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + entries, err := ts.rolesView(ns).List(ctx, "") + if err != nil { + return nil, err + } + + ret := make([]string, len(entries)) + for i, entry := range entries { + ret[i] = strings.TrimPrefix(entry, rolesPrefix) + } + + return logical.ListResponse(ret), nil +} + +func (ts *TokenStore) tokenStoreRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + err = ts.rolesView(ns).Delete(ctx, data.Get("role_name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (ts *TokenStore) tokenStoreRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := ts.tokenStoreRole(ctx, data.Get("role_name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + // TODO (1.4): Remove "period" and "explicit_max_ttl" if they're zero + resp := &logical.Response{ + Data: map[string]interface{}{ + "period": int64(role.Period.Seconds()), + "token_period": int64(role.TokenPeriod.Seconds()), + "explicit_max_ttl": int64(role.ExplicitMaxTTL.Seconds()), + "token_explicit_max_ttl": int64(role.TokenExplicitMaxTTL.Seconds()), + "disallowed_policies": role.DisallowedPolicies, + "allowed_policies": role.AllowedPolicies, + "name": role.Name, + "orphan": role.Orphan, + "path_suffix": role.PathSuffix, + "renewable": role.Renewable, + "token_type": role.TokenType.String(), + "allowed_entity_aliases": role.AllowedEntityAliases, + }, + } + + if len(role.TokenBoundCIDRs) > 0 { + resp.Data["token_bound_cidrs"] = role.TokenBoundCIDRs + } + if len(role.BoundCIDRs) > 0 { + resp.Data["bound_cidrs"] = role.BoundCIDRs + } + if role.TokenNumUses > 0 { + resp.Data["token_num_uses"] = role.TokenNumUses + } + + return resp, nil +} + +func (ts *TokenStore) tokenStoreRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + name := data.Get("role_name").(string) + if name == "" { + return false, fmt.Errorf("role name cannot be empty") + } + role, err := ts.tokenStoreRole(ctx, name) + if err != nil { + return false, err + } + + return role != nil, nil +} + +func (ts *TokenStore) tokenStoreRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("role_name").(string) + if name == "" { + return logical.ErrorResponse("role name cannot be empty"), nil + } + entry, err := ts.tokenStoreRole(ctx, name) + if err != nil { + return nil, err + } + + // Due to the existence check, entry will only be nil if it's a create + // operation, so just create a new one + if entry == nil { + entry = &tsRoleEntry{ + Name: name, + } + } + + // First parse fields not duplicated by the token helper + { + orphanInt, ok := data.GetOk("orphan") + if ok { + entry.Orphan = orphanInt.(bool) + } else if req.Operation == logical.CreateOperation { + entry.Orphan = data.Get("orphan").(bool) + } + + renewableInt, ok := data.GetOk("renewable") + if ok { + entry.Renewable = renewableInt.(bool) + } else if req.Operation == logical.CreateOperation { + entry.Renewable = data.Get("renewable").(bool) + } + + pathSuffixInt, ok := data.GetOk("path_suffix") + if ok { + pathSuffix := pathSuffixInt.(string) + switch { + case pathSuffix != "": + matched := pathSuffixSanitize.MatchString(pathSuffix) + if !matched { + return logical.ErrorResponse(fmt.Sprintf( + "given role path suffix contains invalid characters; must match %s", + pathSuffixSanitize.String())), nil + } + } + entry.PathSuffix = pathSuffix + } else if req.Operation == logical.CreateOperation { + entry.PathSuffix = data.Get("path_suffix").(string) + } + + if strings.Contains(entry.PathSuffix, "..") { + return logical.ErrorResponse(fmt.Sprintf("error registering path suffix: %s", consts.ErrPathContainsParentReferences)), nil + } + + allowedPoliciesRaw, ok := data.GetOk("allowed_policies") + if ok { + entry.AllowedPolicies = policyutil.SanitizePolicies(allowedPoliciesRaw.([]string), policyutil.DoNotAddDefaultPolicy) + } else if req.Operation == logical.CreateOperation { + entry.AllowedPolicies = policyutil.SanitizePolicies(data.Get("allowed_policies").([]string), policyutil.DoNotAddDefaultPolicy) + } + + disallowedPoliciesRaw, ok := data.GetOk("disallowed_policies") + if ok { + entry.DisallowedPolicies = strutil.RemoveDuplicates(disallowedPoliciesRaw.([]string), true) + } else if req.Operation == logical.CreateOperation { + entry.DisallowedPolicies = strutil.RemoveDuplicates(data.Get("disallowed_policies").([]string), true) + } + } + + // We handle token type a bit differently than tokenutil does so we need to + // cache and handle it after + var tokenTypeStr *string + oldEntryTokenType := entry.TokenType + if tokenTypeRaw, ok := data.Raw["token_type"]; ok { + tokenTypeStr = new(string) + *tokenTypeStr = tokenTypeRaw.(string) + delete(data.Raw, "token_type") + entry.TokenType = logical.TokenTypeDefault + } + + // Next parse token fields from the helper + if err := entry.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(errwrap.Wrapf("error parsing role fields: {{err}}", err).Error()), nil + } + + entry.TokenType = oldEntryTokenType + if entry.TokenType == logical.TokenTypeDefault { + entry.TokenType = logical.TokenTypeDefaultService + } + if tokenTypeStr != nil { + switch *tokenTypeStr { + case "service": + entry.TokenType = logical.TokenTypeService + case "batch": + entry.TokenType = logical.TokenTypeBatch + case "default-service": + entry.TokenType = logical.TokenTypeDefaultService + case "default-batch": + entry.TokenType = logical.TokenTypeDefaultBatch + default: + return logical.ErrorResponse(fmt.Sprintf("invalid 'token_type' value %q", *tokenTypeStr)), nil + } + } + + var resp *logical.Response + + // Now handle backwards compat. Prefer token_ fields over others if both + // are set. We set the original fields here so that on read of token role + // we can return the same values that were set. We clear out the Token* + // values because otherwise when we read the role back we'll read stale + // data since if they're not emptied they'll take precedence. + periodRaw, ok := data.GetOk("token_period") + if !ok { + periodRaw, ok = data.GetOk("period") + if ok { + entry.Period = time.Second * time.Duration(periodRaw.(int)) + entry.TokenPeriod = entry.Period + } + } else { + _, ok = data.GetOk("period") + if ok { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("Both 'token_period' and deprecated 'period' value supplied, ignoring the deprecated value") + } + entry.Period = 0 + } + + boundCIDRsRaw, ok := data.GetOk("token_bound_cidrs") + if !ok { + boundCIDRsRaw, ok = data.GetOk("bound_cidrs") + if ok { + boundCIDRs, err := parseutil.ParseAddrs(boundCIDRsRaw.([]string)) + if err != nil { + return logical.ErrorResponse(errwrap.Wrapf("error parsing bound_cidrs: {{err}}", err).Error()), nil + } + entry.BoundCIDRs = boundCIDRs + entry.TokenBoundCIDRs = entry.BoundCIDRs + } + } else { + _, ok = data.GetOk("bound_cidrs") + if ok { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("Both 'token_bound_cidrs' and deprecated 'bound_cidrs' value supplied, ignoring the deprecated value") + } + entry.BoundCIDRs = nil + } + + finalExplicitMaxTTL := entry.TokenExplicitMaxTTL + explicitMaxTTLRaw, ok := data.GetOk("token_explicit_max_ttl") + if !ok { + explicitMaxTTLRaw, ok = data.GetOk("explicit_max_ttl") + if ok { + entry.ExplicitMaxTTL = time.Second * time.Duration(explicitMaxTTLRaw.(int)) + entry.TokenExplicitMaxTTL = entry.ExplicitMaxTTL + } + finalExplicitMaxTTL = entry.ExplicitMaxTTL + } else { + _, ok = data.GetOk("explicit_max_ttl") + if ok { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("Both 'token_explicit_max_ttl' and deprecated 'explicit_max_ttl' value supplied, ignoring the deprecated value") + } + entry.ExplicitMaxTTL = 0 + } + if finalExplicitMaxTTL != 0 { + sysView := ts.System() + + if sysView.MaxLeaseTTL() != time.Duration(0) && finalExplicitMaxTTL > sysView.MaxLeaseTTL() { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning(fmt.Sprintf( + "Given explicit max TTL of %d is greater than system/mount allowed value of %d seconds; until this is fixed attempting to create tokens against this role will result in an error", + int64(finalExplicitMaxTTL.Seconds()), int64(sysView.MaxLeaseTTL().Seconds()))) + } + } + + // no legacy version without the token_ prefix to check for + tokenNumUses, ok := data.GetOk("token_num_uses") + if ok { + entry.TokenNumUses = tokenNumUses.(int) + } + + // Run validity checks on token type + if entry.TokenType == logical.TokenTypeBatch { + if !entry.Orphan { + return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate non-orphan tokens"), nil + } + if entry.Period != 0 || entry.TokenPeriod != 0 { + return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate periodic tokens"), nil + } + if entry.Renewable { + return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate renewable tokens"), nil + } + if entry.ExplicitMaxTTL != 0 || entry.TokenExplicitMaxTTL != 0 { + return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate tokens with an explicit max TTL"), nil + } + } + + allowedEntityAliasesRaw, ok := data.GetOk("allowed_entity_aliases") + if ok { + entry.AllowedEntityAliases = strutil.RemoveDuplicates(allowedEntityAliasesRaw.([]string), true) + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + // Store it + jsonEntry, err := logical.StorageEntryJSON(name, entry) + if err != nil { + return nil, err + } + if err := ts.rolesView(ns).Put(ctx, jsonEntry); err != nil { + return nil, err + } + + return resp, nil +} + +const ( + tokenTidyHelp = ` +This endpoint performs cleanup tasks that can be run if certain error +conditions have occurred. +` + tokenTidyDesc = ` +This endpoint performs cleanup tasks that can be run to clean up token and +lease entries after certain error conditions. Usually running this is not +necessary, and is only required if upgrade notes or support personnel suggest +it. +` + tokenBackendHelp = `The token credential backend is always enabled and builtin to Vault. +Client tokens are used to identify a client and to allow Vault to associate policies and ACLs +which are enforced on every request. This backend also allows for generating sub-tokens as well +as revocation of tokens. The tokens are renewable if associated with a lease.` + tokenCreateHelp = `The token create path is used to create new tokens.` + tokenCreateOrphanHelp = `The token create path is used to create new orphan tokens.` + tokenCreateRoleHelp = `This token create path is used to create new tokens adhering to the given role.` + tokenListRolesHelp = `This endpoint lists configured roles.` + tokenLookupAccessorHelp = `This endpoint will lookup a token associated with the given accessor and its properties. Response will not contain the token ID.` + tokenRenewAccessorHelp = `This endpoint will renew a token associated with the given accessor and its properties. Response will not contain the token ID.` + tokenLookupHelp = `This endpoint will lookup a token and its properties.` + tokenPathRolesHelp = `This endpoint allows creating, reading, and deleting roles.` + tokenRevokeAccessorHelp = `This endpoint will delete the token associated with the accessor and all of its child tokens.` + tokenRevokeHelp = `This endpoint will delete the given token and all of its child tokens.` + tokenRevokeSelfHelp = `This endpoint will delete the token used to call it and all of its child tokens.` + tokenRevokeOrphanHelp = `This endpoint will delete the token and orphan its child tokens.` + tokenRenewHelp = `This endpoint will renew the given token and prevent expiration.` + tokenRenewSelfHelp = `This endpoint will renew the token used to call it and prevent expiration.` + tokenAllowedPoliciesHelp = `If set, tokens can be created with any subset of the policies in this +list, rather than the normal semantics of tokens being a subset of the +calling token's policies. The parameter is a comma-delimited string of +policy names.` + tokenDisallowedPoliciesHelp = `If set, successful token creation via this role will require that +no policies in the given list are requested. The parameter is a comma-delimited string of policy names.` + tokenOrphanHelp = `If true, tokens created via this role +will be orphan tokens (have no parent)` + tokenPeriodHelp = `If set, tokens created via this role +will have no max lifetime; instead, their +renewal period will be fixed to this value. +This takes an integer number of seconds, +or a string duration (e.g. "24h").` + tokenPathSuffixHelp = `If set, tokens created via this role +will contain the given suffix as a part of +their path. This can be used to assist use +of the 'revoke-prefix' endpoint later on. +The given suffix must match the regular +expression.` + tokenExplicitMaxTTLHelp = `If set, tokens created via this role +carry an explicit maximum TTL. During renewal, +the current maximum TTL values of the role +and the mount are not checked for changes, +and any updates to these values will have +no effect on the token being renewed.` + tokenRenewableHelp = `Tokens created via this role will be +renewable or not according to this value. +Defaults to "true".` + tokenListAccessorsHelp = `List token accessors, which can then be +be used to iterate and discover their properties +or revoke them. Because this can be used to +cause a denial of service, this endpoint +requires 'sudo' capability in addition to +'list'.` +) diff --git a/vendor/github.com/hashicorp/vault/vault/token_store_util.go b/vendor/github.com/hashicorp/vault/vault/token_store_util.go new file mode 100644 index 00000000..ca1f39a1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/token_store_util.go @@ -0,0 +1,27 @@ +// +build !enterprise + +package vault + +import ( + "github.com/hashicorp/vault/helper/namespace" +) + +func (ts *TokenStore) baseView(ns *namespace.Namespace) *BarrierView { + return ts.baseBarrierView +} + +func (ts *TokenStore) idView(ns *namespace.Namespace) *BarrierView { + return ts.idBarrierView +} + +func (ts *TokenStore) accessorView(ns *namespace.Namespace) *BarrierView { + return ts.accessorBarrierView +} + +func (ts *TokenStore) parentView(ns *namespace.Namespace) *BarrierView { + return ts.parentBarrierView +} + +func (ts *TokenStore) rolesView(ns *namespace.Namespace) *BarrierView { + return ts.rolesBarrierView +} diff --git a/vendor/github.com/hashicorp/vault/vault/ui.go b/vendor/github.com/hashicorp/vault/vault/ui.go new file mode 100644 index 00000000..5b6f7c49 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/ui.go @@ -0,0 +1,218 @@ +package vault + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "strings" + "sync" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + uiConfigKey = "config" + uiConfigPlaintextKey = "config_plaintext" +) + +// UIConfig contains UI configuration. This takes both a physical view and a barrier view +// because it is stored in both plaintext and encrypted to allow for getting the header +// values before the barrier is unsealed +type UIConfig struct { + l sync.RWMutex + physicalStorage physical.Backend + barrierStorage logical.Storage + + enabled bool + defaultHeaders http.Header +} + +// NewUIConfig creates a new UI config +func NewUIConfig(enabled bool, physicalStorage physical.Backend, barrierStorage logical.Storage) *UIConfig { + defaultHeaders := http.Header{} + defaultHeaders.Set("Content-Security-Policy", "default-src 'none'; connect-src 'self'; img-src 'self' data:; script-src 'self'; style-src 'unsafe-inline' 'self'; form-action 'none'; frame-ancestors 'none'") + defaultHeaders.Set("Service-Worker-Allowed", "/") + + return &UIConfig{ + physicalStorage: physicalStorage, + barrierStorage: barrierStorage, + enabled: enabled, + defaultHeaders: defaultHeaders, + } +} + +// Enabled returns if the UI is enabled +func (c *UIConfig) Enabled() bool { + c.l.RLock() + defer c.l.RUnlock() + return c.enabled +} + +// Headers returns the response headers that should be returned in the UI +func (c *UIConfig) Headers(ctx context.Context) (http.Header, error) { + c.l.RLock() + defer c.l.RUnlock() + + config, err := c.get(ctx) + if err != nil { + return nil, err + } + headers := make(http.Header) + if config != nil { + headers = config.Headers + } + + for k := range c.defaultHeaders { + if headers.Get(k) == "" { + v := c.defaultHeaders.Get(k) + headers.Set(k, v) + } + } + return headers, nil +} + +// HeaderKeys returns the list of the configured headers +func (c *UIConfig) HeaderKeys(ctx context.Context) ([]string, error) { + c.l.RLock() + defer c.l.RUnlock() + + config, err := c.get(ctx) + if err != nil { + return nil, err + } + if config == nil { + return nil, nil + } + var keys []string + for k := range config.Headers { + keys = append(keys, k) + } + return keys, nil +} + +// GetHeader retrieves the configured value for the given header +func (c *UIConfig) GetHeader(ctx context.Context, header string) (string, error) { + c.l.RLock() + defer c.l.RUnlock() + + config, err := c.get(ctx) + if err != nil { + return "", err + } + if config == nil { + return "", nil + } + + value := config.Headers.Get(header) + return value, nil +} + +// SetHeader sets the value for the given header +func (c *UIConfig) SetHeader(ctx context.Context, header, value string) error { + c.l.Lock() + defer c.l.Unlock() + + config, err := c.get(ctx) + if err != nil { + return err + } + if config == nil { + config = &uiConfigEntry{ + Headers: http.Header{}, + } + } + config.Headers.Set(header, value) + return c.save(ctx, config) +} + +// DeleteHeader deletes the header configuration for the given header +func (c *UIConfig) DeleteHeader(ctx context.Context, header string) error { + c.l.Lock() + defer c.l.Unlock() + + config, err := c.get(ctx) + if err != nil { + return err + } + if config == nil { + return nil + } + + config.Headers.Del(header) + return c.save(ctx, config) +} + +func (c *UIConfig) get(ctx context.Context) (*uiConfigEntry, error) { + // Read plaintext always to ensure in sync with barrier value + plaintextConfigRaw, err := c.physicalStorage.Get(ctx, uiConfigPlaintextKey) + if err != nil { + return nil, err + } + + configRaw, err := c.barrierStorage.Get(ctx, uiConfigKey) + if err == nil { + if configRaw == nil { + return nil, nil + } + config := new(uiConfigEntry) + if err := json.Unmarshal(configRaw.Value, config); err != nil { + return nil, err + } + // Check that plaintext value matches barrier value, if not sync values + if plaintextConfigRaw == nil || bytes.Compare(plaintextConfigRaw.Value, configRaw.Value) != 0 { + if err := c.save(ctx, config); err != nil { + return nil, err + } + } + return config, nil + } + + // Respond with error if not sealed + if !strings.Contains(err.Error(), ErrBarrierSealed.Error()) { + return nil, err + } + + // Respond with plaintext value + if configRaw == nil { + return nil, nil + } + config := new(uiConfigEntry) + if err := json.Unmarshal(plaintextConfigRaw.Value, config); err != nil { + return nil, err + } + return config, nil +} + +func (c *UIConfig) save(ctx context.Context, config *uiConfigEntry) error { + if len(config.Headers) == 0 { + if err := c.physicalStorage.Delete(ctx, uiConfigPlaintextKey); err != nil { + return err + } + return c.barrierStorage.Delete(ctx, uiConfigKey) + } + + configRaw, err := json.Marshal(config) + if err != nil { + return err + } + + entry := &physical.Entry{ + Key: uiConfigPlaintextKey, + Value: configRaw, + } + if err := c.physicalStorage.Put(ctx, entry); err != nil { + return err + } + + barrEntry := &logical.StorageEntry{ + Key: uiConfigKey, + Value: configRaw, + } + return c.barrierStorage.Put(ctx, barrEntry) +} + +type uiConfigEntry struct { + Headers http.Header `json:"headers"` +} diff --git a/vendor/github.com/hashicorp/vault/vault/util.go b/vendor/github.com/hashicorp/vault/vault/util.go new file mode 100644 index 00000000..9e03afd2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/util.go @@ -0,0 +1,42 @@ +package vault + +import ( + "crypto/rand" + "fmt" +) + +// memzero is used to zero out a byte buffer. This specific format is optimized +// by the compiler to use memclr to improve performance. See this code review: +// https://codereview.appspot.com/137880043 +// +// Use of memzero is not a guarantee against memory analysis as described in +// the Vault threat model: +// https://www.vaultproject.io/docs/internals/security.html . Vault does not +// provide guarantees against memory analysis or raw memory dumping by +// operators, however it does minimize this exposure by zeroing out buffers +// that contain secrets as soon as they are no longer used. Starting with Go +// 1.5, the garbage collector was changed to become a "generational copying +// garbage collector." This change to the garbage collector makes it +// impossible for Vault to guarantee a buffer with a secret has not been +// copied during a garbage collection. It is therefore possible that secrets +// may be exist in memory that have not been wiped despite a pending memzero +// call. Over time any copied data with a secret will be reused and the +// memory overwritten thereby mitigating some of the risk from this threat +// vector. +func memzero(b []byte) { + if b == nil { + return + } + for i := range b { + b[i] = 0 + } +} + +// randbytes is used to create a buffer of size n filled with random bytes +func randbytes(n int) []byte { + buf := make([]byte, n) + if _, err := rand.Read(buf); err != nil { + panic(fmt.Sprintf("failed to generate %d random bytes: %v", n, err)) + } + return buf +} diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping.go b/vendor/github.com/hashicorp/vault/vault/wrapping.go new file mode 100644 index 00000000..427c0522 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/wrapping.go @@ -0,0 +1,439 @@ +package vault + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "gopkg.in/square/go-jose.v2" + squarejwt "gopkg.in/square/go-jose.v2/jwt" +) + +const ( + // The location of the key used to generate response-wrapping JWTs + coreWrappingJWTKeyPath = "core/wrapping/jwtkey" +) + +func (c *Core) ensureWrappingKey(ctx context.Context) error { + entry, err := c.barrier.Get(ctx, coreWrappingJWTKeyPath) + if err != nil { + return err + } + + var keyParams certutil.ClusterKeyParams + + if entry == nil { + key, err := ecdsa.GenerateKey(elliptic.P521(), c.secureRandomReader) + if err != nil { + return errwrap.Wrapf("failed to generate wrapping key: {{err}}", err) + } + keyParams.D = key.D + keyParams.X = key.X + keyParams.Y = key.Y + keyParams.Type = corePrivateKeyTypeP521 + val, err := jsonutil.EncodeJSON(keyParams) + if err != nil { + return errwrap.Wrapf("failed to encode wrapping key: {{err}}", err) + } + entry = &logical.StorageEntry{ + Key: coreWrappingJWTKeyPath, + Value: val, + } + if err = c.barrier.Put(ctx, entry); err != nil { + return errwrap.Wrapf("failed to store wrapping key: {{err}}", err) + } + } + + // Redundant if we just created it, but in this case serves as a check anyways + if err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil { + return errwrap.Wrapf("failed to decode wrapping key parameters: {{err}}", err) + } + + c.wrappingJWTKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: keyParams.X, + Y: keyParams.Y, + }, + D: keyParams.D, + } + + c.logger.Info("loaded wrapping token key") + + return nil +} + +func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp *logical.Response, auth *logical.Auth) (*logical.Response, error) { + if c.perfStandby { + return forwardWrapRequest(ctx, c, req, resp, auth) + } + + // Before wrapping, obey special rules for listing: if no entries are + // found, 404. This prevents unwrapping only to find empty data. + if req.Operation == logical.ListOperation { + if resp == nil || (len(resp.Data) == 0 && len(resp.Warnings) == 0) { + return nil, logical.ErrUnsupportedPath + } + + keysRaw, ok := resp.Data["keys"] + if !ok || keysRaw == nil { + if len(resp.Data) > 0 || len(resp.Warnings) > 0 { + // We could be returning extra metadata on a list, or returning + // warnings with no data, so handle these cases + goto DONELISTHANDLING + } + return nil, logical.ErrUnsupportedPath + } + + keys, ok := keysRaw.([]string) + if !ok { + return nil, logical.ErrUnsupportedPath + } + if len(keys) == 0 { + return nil, logical.ErrUnsupportedPath + } + } + +DONELISTHANDLING: + var err error + sealWrap := resp.WrapInfo.SealWrap + + var ns *namespace.Namespace + // If we are creating a JWT wrapping token we always want them to live in + // the root namespace. These are only used for replication and plugin setup. + switch resp.WrapInfo.Format { + case "jwt": + ns = namespace.RootNamespace + ctx = namespace.ContextWithNamespace(ctx, ns) + default: + ns, err = namespace.FromContext(ctx) + if err != nil { + return nil, err + } + } + + // If we are wrapping, the first part (performed in this functions) happens + // before auditing so that resp.WrapInfo.Token can contain the HMAC'd + // wrapping token ID in the audit logs, so that it can be determined from + // the audit logs whether the token was ever actually used. + creationTime := time.Now() + te := logical.TokenEntry{ + Path: req.Path, + Policies: []string{"response-wrapping"}, + CreationTime: creationTime.Unix(), + TTL: resp.WrapInfo.TTL, + NumUses: 1, + ExplicitMaxTTL: resp.WrapInfo.TTL, + NamespaceID: ns.ID, + } + + if err := c.tokenStore.create(ctx, &te); err != nil { + c.logger.Error("failed to create wrapping token", "error", err) + return nil, ErrInternalError + } + + resp.WrapInfo.Token = te.ID + resp.WrapInfo.Accessor = te.Accessor + resp.WrapInfo.CreationTime = creationTime + // If this is not a rewrap, store the request path as creation_path + if req.Path != "sys/wrapping/rewrap" { + resp.WrapInfo.CreationPath = req.Path + } + + if auth != nil && auth.EntityID != "" { + resp.WrapInfo.WrappedEntityID = auth.EntityID + } + + // This will only be non-nil if this response contains a token, so in that + // case put the accessor in the wrap info. + if resp.Auth != nil { + resp.WrapInfo.WrappedAccessor = resp.Auth.Accessor + } + + switch resp.WrapInfo.Format { + case "jwt": + // Create the JWT + claims := squarejwt.Claims{ + // Map the JWT ID to the token ID for ease of use + ID: te.ID, + // Set the issue time to the creation time + IssuedAt: squarejwt.NewNumericDate(creationTime), + // Set the expiration to the TTL + Expiry: squarejwt.NewNumericDate(creationTime.Add(resp.WrapInfo.TTL)), + // Set a reasonable not-before time; since unwrapping happens on this + // node we shouldn't have to worry much about drift + NotBefore: squarejwt.NewNumericDate(time.Now().Add(-5 * time.Second)), + } + type privateClaims struct { + Accessor string `json:"accessor"` + Type string `json:"type"` + Addr string `json:"addr"` + } + priClaims := &privateClaims{ + Type: "wrapping", + Addr: c.redirectAddr, + } + if resp.Auth != nil { + priClaims.Accessor = resp.Auth.Accessor + } + sig, err := jose.NewSigner( + jose.SigningKey{Algorithm: jose.ES512, Key: c.wrappingJWTKey}, + (&jose.SignerOptions{}).WithType("JWT")) + if err != nil { + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to create JWT builder", "error", err) + return nil, ErrInternalError + } + ser, err := squarejwt.Signed(sig).Claims(claims).Claims(priClaims).CompactSerialize() + if err != nil { + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to serialize JWT", "error", err) + return nil, ErrInternalError + } + resp.WrapInfo.Token = ser + if c.redirectAddr == "" { + resp.AddWarning("No redirect address set in Vault so none could be encoded in the token. You may need to supply Vault's API address when unwrapping the token.") + } + } + + cubbyReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "cubbyhole/response", + ClientToken: te.ID, + } + if sealWrap { + cubbyReq.WrapInfo = &logical.RequestWrapInfo{ + SealWrap: true, + } + } + cubbyReq.SetTokenEntry(&te) + + // During a rewrap, store the original response, don't wrap it again. + if req.Path == "sys/wrapping/rewrap" { + cubbyReq.Data = map[string]interface{}{ + "response": resp.Data["response"], + } + } else { + httpResponse := logical.LogicalResponseToHTTPResponse(resp) + + // Add the unique identifier of the original request to the response + httpResponse.RequestID = req.ID + + // Because of the way that JSON encodes (likely just in Go) we actually get + // mixed-up values for ints if we simply put this object in the response + // and encode the whole thing; so instead we marshal it first, then store + // the string response. This actually ends up making it easier on the + // client side, too, as it becomes a straight read-string-pass-to-unmarshal + // operation. + + marshaledResponse, err := json.Marshal(httpResponse) + if err != nil { + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to marshal wrapped response", "error", err) + return nil, ErrInternalError + } + + cubbyReq.Data = map[string]interface{}{ + "response": string(marshaledResponse), + } + } + + cubbyResp, err := c.router.Route(ctx, cubbyReq) + if err != nil { + // Revoke since it's not yet being tracked for expiration + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to store wrapped response information", "error", err) + return nil, ErrInternalError + } + if cubbyResp != nil && cubbyResp.IsError() { + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to store wrapped response information", "error", cubbyResp.Data["error"]) + return cubbyResp, nil + } + + // Store info for lookup + cubbyReq.WrapInfo = nil + cubbyReq.Path = "cubbyhole/wrapinfo" + cubbyReq.Data = map[string]interface{}{ + "creation_ttl": resp.WrapInfo.TTL, + "creation_time": creationTime, + } + // Store creation_path if not a rewrap + if req.Path != "sys/wrapping/rewrap" { + cubbyReq.Data["creation_path"] = req.Path + } else { + cubbyReq.Data["creation_path"] = resp.WrapInfo.CreationPath + } + cubbyResp, err = c.router.Route(ctx, cubbyReq) + if err != nil { + // Revoke since it's not yet being tracked for expiration + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to store wrapping information", "error", err) + return nil, ErrInternalError + } + if cubbyResp != nil && cubbyResp.IsError() { + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to store wrapping information", "error", cubbyResp.Data["error"]) + return cubbyResp, nil + } + + wAuth := &logical.Auth{ + ClientToken: te.ID, + Policies: []string{"response-wrapping"}, + LeaseOptions: logical.LeaseOptions{ + TTL: te.TTL, + Renewable: false, + }, + } + + // Register the wrapped token with the expiration manager + if err := c.expiration.RegisterAuth(ctx, &te, wAuth); err != nil { + // Revoke since it's not yet being tracked for expiration + c.tokenStore.revokeOrphan(ctx, te.ID) + c.logger.Error("failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err) + return nil, ErrInternalError + } + + return nil, nil +} + +// validateWrappingToken checks whether a token is a wrapping token. The passed +// in logical request will be updated if the wrapping token was provided within +// a JWT token. +func (c *Core) ValidateWrappingToken(ctx context.Context, req *logical.Request) (valid bool, err error) { + if req == nil { + return false, fmt.Errorf("invalid request") + } + + if c.Sealed() { + return false, consts.ErrSealed + } + + c.stateLock.RLock() + defer c.stateLock.RUnlock() + + if c.standby && !c.perfStandby { + return false, consts.ErrStandby + } + + defer func() { + // Perform audit logging before returning if there's an issue with checking + // the wrapping token + if err != nil || !valid { + // We log the Auth object like so here since the wrapping token can + // come from the header, which gets set as the ClientToken + auth := &logical.Auth{ + ClientToken: req.ClientToken, + Accessor: req.ClientTokenAccessor, + } + + logInput := &logical.LogInput{ + Auth: auth, + Request: req, + } + if err != nil { + logInput.OuterErr = errors.New("error validating wrapping token") + } + if !valid { + logInput.OuterErr = consts.ErrInvalidWrappingToken + } + if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { + c.logger.Error("failed to audit request", "path", req.Path, "error", err) + } + } + }() + + var token string + var thirdParty bool + + // Check if the wrapping token is coming from the request body, and if not + // assume that req.ClientToken is the wrapping token + if req.Data != nil && req.Data["token"] != nil { + thirdParty = true + if tokenStr, ok := req.Data["token"].(string); !ok { + return false, fmt.Errorf("could not decode token in request body") + } else if tokenStr == "" { + return false, fmt.Errorf("empty token in request body") + } else { + token = tokenStr + } + } else { + token = req.ClientToken + } + + // Check for it being a JWT. If it is, and it is valid, we extract the + // internal client token from it and use that during lookup. The second + // check is a quick check to verify that we don't consider a namespaced + // token to be a JWT -- namespaced tokens have two dots too, but Vault + // token types (for now at least) begin with a letter representing a type + // and then a dot. + if strings.Count(token, ".") == 2 && token[1] != '.' { + // Implement the jose library way + parsedJWT, err := squarejwt.ParseSigned(token) + if err != nil { + return false, errwrap.Wrapf("wrapping token could not be parsed: {{err}}", err) + } + var claims squarejwt.Claims + var allClaims = make(map[string]interface{}) + if err = parsedJWT.Claims(&c.wrappingJWTKey.PublicKey, &claims, &allClaims); err != nil { + return false, errwrap.Wrapf("wrapping token signature could not be validated: {{err}}", err) + } + typeClaimRaw, ok := allClaims["type"] + if !ok { + return false, errors.New("could not validate type claim") + } + typeClaim, ok := typeClaimRaw.(string) + if !ok { + return false, errors.New("could not parse type claim") + } + if typeClaim != "wrapping" { + return false, errors.New("unexpected type claim") + } + if !thirdParty { + req.ClientToken = claims.ID + } else { + req.Data["token"] = claims.ID + } + + token = claims.ID + } + + if token == "" { + return false, fmt.Errorf("token is empty") + } + + te, err := c.tokenStore.Lookup(ctx, token) + if err != nil { + return false, err + } + if te == nil { + return false, nil + } + + if len(te.Policies) != 1 { + return false, nil + } + + if te.Policies[0] != responseWrappingPolicyName && te.Policies[0] != controlGroupPolicyName { + return false, nil + } + + if !thirdParty { + req.ClientTokenAccessor = te.Accessor + req.ClientTokenRemainingUses = te.NumUses + req.SetTokenEntry(te) + } + + return true, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping_util.go b/vendor/github.com/hashicorp/vault/vault/wrapping_util.go new file mode 100644 index 00000000..e40c5aac --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/wrapping_util.go @@ -0,0 +1,13 @@ +// +build !enterprise + +package vault + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" +) + +func forwardWrapRequest(context.Context, *Core, *logical.Request, *logical.Response, *logical.Auth) (*logical.Response, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 00000000..83656241 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 00000000..f0e5c79e --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 00000000..d4db7fc9 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 00000000..be6ebca9 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 00000000..4f529382 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 00000000..672a0e58 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 00000000..18a078c8 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,98 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 00000000..a80ddec3 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,653 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 00000000..183d797b --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 00000000..aa239197 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 00000000..8a73e924 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/jefferai/isbadcipher/.gitattributes b/vendor/github.com/jefferai/isbadcipher/.gitattributes new file mode 100644 index 00000000..d2f212e5 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/github.com/jefferai/isbadcipher/.gitignore b/vendor/github.com/jefferai/isbadcipher/.gitignore new file mode 100644 index 00000000..8339fd61 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/github.com/jefferai/isbadcipher/AUTHORS b/vendor/github.com/jefferai/isbadcipher/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/jefferai/isbadcipher/CONTRIBUTORS b/vendor/github.com/jefferai/isbadcipher/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/jefferai/isbadcipher/LICENSE b/vendor/github.com/jefferai/isbadcipher/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jefferai/isbadcipher/PATENTS b/vendor/github.com/jefferai/isbadcipher/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/jefferai/isbadcipher/README.md b/vendor/github.com/jefferai/isbadcipher/README.md new file mode 100644 index 00000000..f39febc1 --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/README.md @@ -0,0 +1,5 @@ +# IsBadCipher + +This repository forks the Go supplemental networking library in order to +publicly expose a function that allows testing of a cipher spec against the +blacklisted set in the HTTP/2 spec. diff --git a/vendor/github.com/jefferai/isbadcipher/ciphers.go b/vendor/github.com/jefferai/isbadcipher/ciphers.go new file mode 100644 index 00000000..1d6d857a --- /dev/null +++ b/vendor/github.com/jefferai/isbadcipher/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package isbadcipher + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// IsBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func IsBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/github.com/jefferai/jsonx/LICENSE b/vendor/github.com/jefferai/jsonx/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/jefferai/jsonx/README.md b/vendor/github.com/jefferai/jsonx/README.md new file mode 100644 index 00000000..a7bb5bac --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/README.md @@ -0,0 +1,12 @@ +JSONx +======== + +[![GoDoc](https://godoc.org/github.com/jefferai/jsonx?status.svg)](https://godoc.org/github.com/jefferai/jsonx) + +A Go (Golang) library to transform an object or existing JSON bytes into +[JSONx](https://www.ibm.com/support/knowledgecenter/SS9H2Y_7.5.0/com.ibm.dp.doc/json_jsonxconversionrules.html). +Because sometimes your luck runs out. + +This follows the "standard" except for the handling of special and escaped +characters. Names and values are properly XML-escaped but there is no special +handling of values already escaped in JSON if they are valid in XML. diff --git a/vendor/github.com/jefferai/jsonx/go.mod b/vendor/github.com/jefferai/jsonx/go.mod new file mode 100644 index 00000000..eaf7062a --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/go.mod @@ -0,0 +1,3 @@ +module github.com/jefferai/jsonx + +require github.com/Jeffail/gabs v1.1.1 diff --git a/vendor/github.com/jefferai/jsonx/go.sum b/vendor/github.com/jefferai/jsonx/go.sum new file mode 100644 index 00000000..4169e3d0 --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/go.sum @@ -0,0 +1,2 @@ +github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= +github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= diff --git a/vendor/github.com/jefferai/jsonx/jsonx.go b/vendor/github.com/jefferai/jsonx/jsonx.go new file mode 100644 index 00000000..93d24a9b --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/jsonx.go @@ -0,0 +1,132 @@ +package jsonx + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "sort" + + "github.com/Jeffail/gabs" +) + +const ( + XMLHeader = `` + Header = `` + Footer = `` +) + +// namedContainer wraps a gabs.Container to carry name information with it +type namedContainer struct { + name string + *gabs.Container +} + +// Marshal marshals the input data into JSONx. +func Marshal(input interface{}) (string, error) { + jsonBytes, err := json.Marshal(input) + if err != nil { + return "", err + } + xmlBytes, err := EncodeJSONBytes(jsonBytes) + if err != nil { + return "", err + } + return fmt.Sprintf("%s%s%s%s", XMLHeader, Header, string(xmlBytes), Footer), nil +} + +// EncodeJSONBytes encodes JSON-formatted bytes into JSONx. It is designed to +// be used for multiple entries so does not prepend the JSONx header tag or +// append the JSONx footer tag. You can use jsonx.Header and jsonx.Footer to +// easily add these when necessary. +func EncodeJSONBytes(input []byte) ([]byte, error) { + o := bytes.NewBuffer(nil) + reader := bytes.NewReader(input) + dec := json.NewDecoder(reader) + dec.UseNumber() + + cont, err := gabs.ParseJSONDecoder(dec) + if err != nil { + return nil, err + } + + if err := sortAndTransformObject(o, &namedContainer{Container: cont}); err != nil { + return nil, err + } + + return o.Bytes(), nil +} + +func transformContainer(o *bytes.Buffer, cont *namedContainer) error { + var printName string + + if cont.name != "" { + escapedNameBuf := bytes.NewBuffer(nil) + err := xml.EscapeText(escapedNameBuf, []byte(cont.name)) + if err != nil { + return err + } + printName = fmt.Sprintf(" name=\"%s\"", escapedNameBuf.String()) + } + + data := cont.Data() + switch data.(type) { + case nil: + o.WriteString(fmt.Sprintf("", printName)) + + case bool: + o.WriteString(fmt.Sprintf("%t", printName, data)) + + case json.Number: + o.WriteString(fmt.Sprintf("%v", printName, data)) + + case string: + o.WriteString(fmt.Sprintf("%v", printName, data)) + + case []interface{}: + o.WriteString(fmt.Sprintf("", printName)) + arrayChildren, err := cont.Children() + if err != nil { + return err + } + for _, child := range arrayChildren { + if err := transformContainer(o, &namedContainer{Container: child}); err != nil { + return err + } + } + o.WriteString("") + + case map[string]interface{}: + o.WriteString(fmt.Sprintf("", printName)) + + if err := sortAndTransformObject(o, cont); err != nil { + return err + } + + o.WriteString("") + } + + return nil +} + +// sortAndTransformObject sorts object keys to make the output predictable so +// the package can be tested; logic is here to prevent code duplication +func sortAndTransformObject(o *bytes.Buffer, cont *namedContainer) error { + objectChildren, err := cont.ChildrenMap() + if err != nil { + return err + } + + sortedNames := make([]string, 0, len(objectChildren)) + for name, _ := range objectChildren { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + for _, name := range sortedNames { + if err := transformContainer(o, &namedContainer{name: name, Container: objectChildren[name]}); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/keybase/go-crypto/AUTHORS b/vendor/github.com/keybase/go-crypto/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/keybase/go-crypto/CONTRIBUTORS b/vendor/github.com/keybase/go-crypto/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/keybase/go-crypto/LICENSE b/vendor/github.com/keybase/go-crypto/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/keybase/go-crypto/PATENTS b/vendor/github.com/keybase/go-crypto/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go b/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go new file mode 100644 index 00000000..77fb8b9a --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for enviroments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go b/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go new file mode 100644 index 00000000..7e291d6a --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} diff --git a/vendor/github.com/keybase/go-crypto/cast5/cast5.go b/vendor/github.com/keybase/go-crypto/cast5/cast5.go new file mode 100644 index 00000000..e0207352 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/cast5/cast5.go @@ -0,0 +1,526 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common +// OpenPGP cipher. +package cast5 // import "github.com/keybase/go-crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h new file mode 100644 index 00000000..b3f74162 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s new file mode 100644 index 00000000..ee7b4bd5 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s new file mode 100644 index 00000000..cd793a5b --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go b/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go new file mode 100644 index 00000000..cb8fbc57 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go b/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go new file mode 100644 index 00000000..a3d3a3d9 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go @@ -0,0 +1,124 @@ +package curve25519 + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var cv25519 cv25519Curve + +type cv25519Curve struct { + *elliptic.CurveParams +} + +func copyReverse(dst []byte, src []byte) { + // Curve 25519 multiplication functions expect scalars in reverse + // order than PGP. To keep the curve25519Curve type consistent + // with other curves, we reverse it here. + for i, j := 0, len(src)-1; j >= 0 && i < len(dst); i, j = i+1, j-1 { + dst[i] = src[j] + } +} + +func copyTruncate(dst []byte, src []byte) { + lenDst, lenSrc := len(dst), len(src) + if lenDst == lenSrc { + copy(dst, src) + } else if lenDst > lenSrc { + copy(dst[lenDst-lenSrc:lenDst], src) + } else if lenDst < lenSrc { + copy(dst, src[:lenDst]) + } +} + +func (cv25519Curve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + // Assume y1 is 0 with cv25519. + var dst [32]byte + var x1Bytes [32]byte + var scalarBytes [32]byte + + copyTruncate(x1Bytes[:], x1.Bytes()) + copyReverse(scalarBytes[:], scalar) + + scalarMult(&dst, &scalarBytes, &x1Bytes) + + x = new(big.Int).SetBytes(dst[:]) + y = new(big.Int) + return x, y +} + +func (cv25519Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + var dst [32]byte + var scalarBytes [32]byte + copyReverse(scalarBytes[:], scalar[:32]) + scalarMult(&dst, &scalarBytes, &basePoint) + x = new(big.Int).SetBytes(dst[:]) + y = new(big.Int) + return x, y +} + +func (cv25519Curve) IsOnCurve(bigX, bigY *big.Int) bool { + return bigY.Sign() == 0 // bigY == 0 ? +} + +// More information about 0x40 point format: +// https://tools.ietf.org/html/draft-koch-eddsa-for-openpgp-00#section-3 +// In addition to uncompressed point format described here: +// https://tools.ietf.org/html/rfc6637#section-6 + +func (cv25519Curve) MarshalType40(x, y *big.Int) []byte { + byteLen := 32 + + ret := make([]byte, 1+byteLen) + ret[0] = 0x40 + + xBytes := x.Bytes() + copyTruncate(ret[1:], xBytes) + return ret +} + +func (cv25519Curve) UnmarshalType40(data []byte) (x, y *big.Int) { + if len(data) != 1+32 { + return nil, nil + } + if data[0] != 0x40 { + return nil, nil + } + x = new(big.Int).SetBytes(data[1:]) + // Any x is a valid curve point. + return x, new(big.Int) +} + +// ToCurve25519 casts given elliptic.Curve type to Curve25519 type, or +// returns nil, false if cast was unsuccessful. +func ToCurve25519(cv elliptic.Curve) (cv25519Curve, bool) { + cv2, ok := cv.(cv25519Curve) + return cv2, ok +} + +func initCv25519() { + cv25519.CurveParams = &elliptic.CurveParams{Name: "Curve 25519"} + // Some code relies on these parameters being available for + // checking Curve coordinate length. They should not be used + // directly for any calculations. + cv25519.P, _ = new(big.Int).SetString("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed", 16) + cv25519.N, _ = new(big.Int).SetString("1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed", 16) + cv25519.Gx, _ = new(big.Int).SetString("9", 16) + cv25519.Gy, _ = new(big.Int).SetString("20ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9", 16) + cv25519.BitSize = 256 +} + +var initonce sync.Once + +// Cv25519 returns a Curve which (partially) implements Cv25519. Only +// ScalarMult and ScalarBaseMult are valid for this curve. Add and +// Double should not be used. +func Cv25519() elliptic.Curve { + initonce.Do(initCv25519) + return cv25519 +} + +func (curve cv25519Curve) Params() *elliptic.CurveParams { + return curve.CurveParams +} diff --git a/vendor/github.com/keybase/go-crypto/curve25519/doc.go b/vendor/github.com/keybase/go-crypto/curve25519/doc.go new file mode 100644 index 00000000..78bd9fc0 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "github.com/keybase/go-crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s new file mode 100644 index 00000000..39081610 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 00000000..9e9040b2 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go b/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go new file mode 100644 index 00000000..5822bd53 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s new file mode 100644 index 00000000..5ce80a2e --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s new file mode 100644 index 00000000..12f73734 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go b/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go new file mode 100644 index 00000000..5ba434b8 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go @@ -0,0 +1,217 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "github.com/keybase/go-crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:32]) + return seed +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[32:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + + digest := sha512.Sum512(seed) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + privateKey := make([]byte, PrivateKeySize) + copy(privateKey, seed) + copy(privateKey[32:], publicKeyBytes[:]) + + return privateKey +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 00000000..e39f086c --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 00000000..fd03c252 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go new file mode 100644 index 00000000..28717403 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,302 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/keybase/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + "unicode" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc *uint32 +} + +// ourIsSpace checks if a rune is either space according to unicode +// package, or ZeroWidthSpace (which is not a space according to +// unicode module). Used to trim lines during header reading. +func ourIsSpace(r rune) bool { + return r == '\u200b' || unicode.IsSpace(r) +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + + // Entry-level cleanup, just trim spaces. + line = bytes.TrimFunc(line, ourIsSpace) + + lineWithChecksum := false + foldedChecksum := false + if !isPrefix && len(line) >= 5 && line[len(line)-5] == '=' && line[len(line)-4] != '=' { + // This is the checksum line. Checksum should appear on separate line, + // but some bundles don't have a newline between main payload and the + // checksum, and we try to support that. + + // `=` is not a base64 character with the exception of padding, and the + // padding can only be 2 characters long at most ("=="), so we can + // safely assume that 5 characters starting with `=` at the end of the + // line can't be a valid ending of a base64 stream. In other words, `=` + // at position len-5 in base64 stream can never be a valid part of that + // stream. + + // Checksum can never appear if isPrefix is true - that is, when + // ReadLine returned non-final part of some line because it was longer + // than its buffer. + + if l.crc != nil { + // Error out early if there are multiple checksums. + return 0, ArmorCorrupt + } + + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[len(line)-4:]) + if err != nil { + return 0, fmt.Errorf("error decoding CRC: %s", err.Error()) + } else if m != 3 { + return 0, fmt.Errorf("error decoding CRC: wrong size CRC") + } + + crc := uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + l.crc = &crc + + line = line[:len(line)-5] + + lineWithChecksum = true + + // If we've found a checksum but there is still data left, we don't + // want to enter the "looking for armor end" loop, we still need to + // return the leftover data to the reader. + foldedChecksum = len(line) > 0 + + // At this point, `line` contains leftover data or "" (if checksum + // was on separate line.) + } + + expectArmorEnd := false + if l.crc != nil && !foldedChecksum { + // "looking for armor end" loop + + // We have a checksum, and we are now reading what comes afterwards. + // Skip all empty lines until we see something and we except it to be + // ArmorEnd at this point. + + // This loop is not entered if there is more data *before* the CRC + // suffix (if the CRC is not on separate line). + for { + if len(strings.TrimSpace(string(line))) > 0 { + break + } + lineWithChecksum = false + line, _, err = l.in.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return + } + } + expectArmorEnd = true + } + + if bytes.HasPrefix(line, armorEnd) { + if lineWithChecksum { + // ArmorEnd and checksum at the same line? + return 0, ArmorCorrupt + } + l.eof = true + return 0, io.EOF + } else if expectArmorEnd { + // We wanted armorEnd but didn't see one. + return 0, ArmorCorrupt + } + + // Clean-up line from whitespace to pass it further (to base64 + // decoder). This is done after test for CRC and test for + // armorEnd. Keys that have whitespace in CRC will have CRC + // treated as part of the payload and probably fail in base64 + // reading. + line = bytes.Map(func(r rune) rune { + if ourIsSpace(r) { + return -1 + } + return r + }, line) + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != nil && *r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimFunc(line, ourIsSpace) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go new file mode 100644 index 00000000..075a1978 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine, []byte{'\n'}) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go b/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go new file mode 100644 index 00000000..e601e389 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 00000000..1a87b275 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,316 @@ +package ecdh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/elliptic" + "encoding/binary" + "errors" + "github.com/keybase/go-crypto/curve25519" + "io" + "math/big" +) + +type PublicKey struct { + elliptic.Curve + X, Y *big.Int +} + +type PrivateKey struct { + PublicKey + X *big.Int +} + +// KDF implements Key Derivation Function as described in +// https://tools.ietf.org/html/rfc6637#section-7 +func (e *PublicKey) KDF(S []byte, kdfParams []byte, hash crypto.Hash) []byte { + sLen := (e.Curve.Params().P.BitLen() + 7) / 8 + buf := new(bytes.Buffer) + buf.Write([]byte{0, 0, 0, 1}) + if sLen > len(S) { + // zero-pad the S. If we got invalid S (bigger than curve's + // P), we are going to produce invalid key. Garbage in, + // garbage out. + buf.Write(make([]byte, sLen-len(S))) + } + buf.Write(S) + buf.Write(kdfParams) + + hashw := hash.New() + + hashw.Write(buf.Bytes()) + key := hashw.Sum(nil) + + return key +} + +// AESKeyUnwrap implements RFC 3394 Key Unwrapping. See +// http://tools.ietf.org/html/rfc3394#section-2.2.1 +// Note: The second described algorithm ("index-based") is implemented +// here. +func AESKeyUnwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, errors.New("cipherText must by a multiple of 64 bits") + } + + cipher, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + // - Set A = C[0] + var A [aes.BlockSize]byte + copy(A[:8], cipherText[:8]) + + // For i = 1 to n + // Set R[i] = C[i] + R := make([]byte, len(cipherText)-8) + copy(R, cipherText[8:]) + + // 2) Compute intermediate values. + for j := 5; j >= 0; j-- { + for i := nblocks - 1; i >= 0; i-- { + // B = AES-1(K, (A ^ t) | R[i]) where t = n*j+i + // A = MSB(64, B) + t := uint64(nblocks*j + i + 1) + At := binary.BigEndian.Uint64(A[:8]) ^ t + binary.BigEndian.PutUint64(A[:8], At) + + copy(A[8:], R[i*8:i*8+8]) + cipher.Decrypt(A[:], A[:]) + + // R[i] = LSB(B, 64) + copy(R[i*8:i*8+8], A[8:]) + } + } + + // 3) Output results. + // If A is an appropriate initial value (see 2.2.3), + for i := 0; i < 8; i++ { + if A[i] != 0xA6 { + return nil, errors.New("Failed to unwrap key (A is not IV)") + } + } + + return R, nil +} + +// AESKeyWrap implements RFC 3394 Key Wrapping. See +// https://tools.ietf.org/html/rfc3394#section-2.2.2 +// Note: The second described algorithm ("index-based") is implemented +// here. +func AESKeyWrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, errors.New("plainText must be a multiple of 64 bits") + } + + cipher, err := aes.NewCipher(key) // NewCipher checks key size + if err != nil { + return nil, err + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var A [aes.BlockSize]byte + // Section 2.2.3.1 -- Initial Value + // http://tools.ietf.org/html/rfc3394#section-2.2.3.1 + for i := 0; i < 8; i++ { + A[i] = 0xA6 + } + + // For i = 1 to n + // Set R[i] = P[i] + R := make([]byte, len(plainText)) + copy(R, plainText) + + // 2) Calculate intermediate values. + for j := 0; j <= 5; j++ { + for i := 0; i < nblocks; i++ { + // B = AES(K, A | R[i]) + copy(A[8:], R[i*8:i*8+8]) + cipher.Encrypt(A[:], A[:]) + + // (Assume B = A) + // A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(j*nblocks + i + 1) + At := binary.BigEndian.Uint64(A[:8]) ^ t + binary.BigEndian.PutUint64(A[:8], At) + + // R[i] = LSB(64, B) + copy(R[i*8:i*8+8], A[8:]) + } + } + + // 3) Output results. + // Set C[0] = A + // For i = 1 to n + // C[i] = R[i] + return append(A[:8], R...), nil +} + +// PadBuffer pads byte buffer buf to a length being multiple of +// blockLen. Additional bytes appended to the buffer have value of the +// number padded bytes. E.g. if the buffer is 3 bytes short of being +// 40 bytes total, the appended bytes will be [03, 03, 03]. +func PadBuffer(buf []byte, blockLen int) []byte { + padding := blockLen - (len(buf) % blockLen) + if padding == 0 { + return buf + } + + padBuf := make([]byte, padding) + for i := 0; i < padding; i++ { + padBuf[i] = byte(padding) + } + + return append(buf, padBuf...) +} + +// UnpadBuffer verifies that buffer contains proper padding and +// returns buffer without the padding, or nil if the padding was +// invalid. +func UnpadBuffer(buf []byte, dataLen int) []byte { + padding := len(buf) - dataLen + outBuf := buf[:dataLen] + + for i := dataLen; i < len(buf); i++ { + if buf[i] != byte(padding) { + // Invalid padding - bail out + return nil + } + } + + return outBuf +} + +func (e *PublicKey) Encrypt(random io.Reader, kdfParams []byte, plain []byte, hash crypto.Hash, kdfKeySize int) (Vx *big.Int, Vy *big.Int, C []byte, err error) { + // Vx, Vy - encryption key + + // Note for Curve 25519 - curve25519 library already does key + // clamping in scalarMult, so we can use generic random scalar + // generation from elliptic. + priv, Vx, Vy, err := elliptic.GenerateKey(e.Curve, random) + if err != nil { + return nil, nil, nil, err + } + + // Sx, Sy - shared secret + Sx, _ := e.Curve.ScalarMult(e.X, e.Y, priv) + + // Encrypt the payload with KDF-ed S as the encryption key. Pass + // the ciphertext along with V to the recipient. Recipient can + // generate S using V and their priv key, and then KDF(S), on + // their own, to get encryption key and decrypt the ciphertext, + // revealing encryption key for symmetric encryption later. + + plain = PadBuffer(plain, 8) + key := e.KDF(Sx.Bytes(), kdfParams, hash) + + // Take only as many bytes from key as the key length (the hash + // result might be bigger) + encrypted, err := AESKeyWrap(key[:kdfKeySize], plain) + + return Vx, Vy, encrypted, nil +} + +func (e *PrivateKey) DecryptShared(X, Y *big.Int) []byte { + Sx, _ := e.Curve.ScalarMult(X, Y, e.X.Bytes()) + return Sx.Bytes() +} + +func countBits(buffer []byte) int { + var headerLen int + switch buffer[0] { + case 0x4: + headerLen = 3 + case 0x40: + headerLen = 7 + default: + // Unexpected header - but we can still count the bits. + val := buffer[0] + headerLen = 0 + for val > 0 { + val = val / 2 + headerLen++ + } + } + + return headerLen + (len(buffer)-1)*8 +} + +// elliptic.Marshal and elliptic.Unmarshal only marshals uncompressed +// 0x4 MPI types. These functions will check if the curve is cv25519, +// and if so, use 0x40 compressed type to (un)marshal. Otherwise, +// elliptic.(Un)marshal will be called. + +// Marshal encodes point into either 0x4 uncompressed point form, or +// 0x40 compressed point for Curve 25519. +func Marshal(curve elliptic.Curve, x, y *big.Int) (buf []byte, bitSize int) { + // NOTE: Read more about MPI encoding in the RFC: + // https://tools.ietf.org/html/rfc4880#section-3.2 + + // We are required to encode size in bits, counting from the most- + // significant non-zero bit. So assuming that the buffer never + // starts with 0x00, we only need to count bits in the first byte + // - and in current implentation it will always be 0x4 or 0x40. + + cv, ok := curve25519.ToCurve25519(curve) + if ok { + buf = cv.MarshalType40(x, y) + } else { + buf = elliptic.Marshal(curve, x, y) + } + + return buf, countBits(buf) +} + +// Unmarshal converts point, serialized by Marshal, into x, y pair. +// For 0x40 compressed points (for Curve 25519), y will always be 0. +// It is an error if point is not on the curve, On error, x = nil. +func Unmarshal(curve elliptic.Curve, data []byte) (x, y *big.Int) { + cv, ok := curve25519.ToCurve25519(curve) + if ok { + return cv.UnmarshalType40(data) + } + + return elliptic.Unmarshal(curve, data) +} + +func GenerateKey(curve elliptic.Curve, random io.Reader) (priv *PrivateKey, err error) { + var privBytes []byte + var Vx, Vy *big.Int + + if _, ok := curve25519.ToCurve25519(curve); ok { + privBytes = make([]byte, 32) + _, err = io.ReadFull(random, privBytes) + if err != nil { + return nil, err + } + + // NOTE: PGP expect scalars in reverse order than Curve 25519 + // go library. That's why this trimming is backwards compared + // to curve25519.go + privBytes[31] &= 248 + privBytes[0] &= 127 + privBytes[0] |= 64 + + Vx,Vy = curve.ScalarBaseMult(privBytes) + } else { + privBytes, Vx, Vy, err = elliptic.GenerateKey(curve, random) + if err != nil { + return nil, err + } + } + + priv = &PrivateKey{} + priv.X = new(big.Int).SetBytes(privBytes) + priv.PublicKey.Curve = curve + priv.PublicKey.X = Vx + priv.PublicKey.Y = Vy + return priv, nil +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000..15dafc55 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/keybase/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go b/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go new file mode 100644 index 00000000..855fa89c --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,80 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/keybase/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// DeprecatedKeyError indicates that the key was read and verified +// properly, but uses a deprecated algorithm and can't be used. +type DeprecatedKeyError string + +func (d DeprecatedKeyError) Error() string { + return "openpgp: key is deprecated: " + string(d) +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/keys.go b/vendor/github.com/keybase/go-crypto/openpgp/keys.go new file mode 100644 index 00000000..b30315c4 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/keys.go @@ -0,0 +1,934 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/hmac" + "encoding/binary" + "io" + "time" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/rsa" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + // Revocations that are signed by designated revokers. Reading keys + // will not verify these revocations, because it won't have access to + // issuers' public keys, API consumers should do this instead (or + // not, and just assume that the key is probably revoked). + UnverifiedRevocations []*packet.Signature + Subkeys []Subkey + BadSubkeys []BadSubkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature + Revocation *packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocation *packet.Signature +} + +// BadSubkey is one that failed reconstruction, but we'll keep it around for +// informational purposes. +type BadSubkey struct { + Subkey + Err error +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + KeyFlags packet.KeyFlagBits +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + + // KeysById returns the set of keys that have the given key id. + // fp can be optionally supplied, which is the full key fingerprint. + // If it's provided, then it must match. This comes up in the case + // of GPG subpacket 33. + KeysById(id uint64, fp []byte) []Key + + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + // fp can be optionally supplied, which is the full key fingerprint. + // If it's provided, then it must match. This comes up in the case + // of GPG subpacket 33. + KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) []Key + + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest, non-revoked key that can + // encrypt. + var maxTime time.Time + for i, subkey := range e.Subkeys { + + // NOTE(maxtaco) + // If there is a Flags subpacket, then we have to follow it, and only + // use keys that are marked for Encryption of Communication. If there + // isn't a Flags subpacket, and this is an Encrypt-Only key (right now only ElGamal + // suffices), then we implicitly use it. The check for primary below is a little + // more open-ended, but for now, let's be strict and potentially open up + // if we see bugs in the wild. + // + // One more note: old DSA/ElGamal keys tend not to have the Flags subpacket, + // so this sort of thing is pretty important for encrypting to older keys. + // + if ((subkey.Sig.FlagsValid && subkey.Sig.FlagEncryptCommunications) || + (!subkey.Sig.FlagsValid && subkey.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal)) && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + subkey.Revocation == nil && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + // + // NOTE(maxtaco) - see note above, how this policy is a little too open-ended + // for my liking, but leave it for now. + i := e.primaryIdentity() + if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications) && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest, non-revoked key that can + // sign. + var maxTime time.Time + for i, subkey := range e.Subkeys { + if (!subkey.Sig.FlagsValid || subkey.Sig.FlagSign) && + subkey.PrivateKey.PrivateKey != nil && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) && + subkey.Revocation == nil && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + !i.SelfSignature.KeyExpired(now) && + e.PrivateKey.PrivateKey != nil { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +func keyMatchesIdAndFingerprint(key *packet.PublicKey, id uint64, fp []byte) bool { + if key.KeyId != id { + return false + } + if fp == nil { + return true + } + return hmac.Equal(fp, key.Fingerprint[:]) +} + +// KeysById returns the set of keys that have the given key id. +// fp can be optionally supplied, which is the full key fingerprint. +// If it's provided, then it must match. This comes up in the case +// of GPG subpacket 33. +func (el EntityList) KeysById(id uint64, fp []byte) (keys []Key) { + for _, e := range el { + if keyMatchesIdAndFingerprint(e.PrimaryKey, id, fp) { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + + var keyFlags packet.KeyFlagBits + for _, ident := range e.Identities { + keyFlags.Merge(ident.SelfSignature.GetKeyFlags()) + } + + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, keyFlags}) + } + + for _, subKey := range e.Subkeys { + if keyMatchesIdAndFingerprint(subKey.PublicKey, id, fp) { + + // If there's both a a revocation and a sig, then take the + // revocation. Otherwise, we can proceed with the sig. + sig := subKey.Revocation + if sig == nil { + sig = subKey.Sig + } + + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig, sig.GetKeyFlags()}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +// fp can be optionally supplied, which is the full key fingerprint. +// If it's provided, then it must match. This comes up in the case +// of GPG subpacket 33. +func (el EntityList) KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id, fp) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if requiredUsage != 0 { + var usage byte + + switch { + case key.KeyFlags.Valid: + usage = key.KeyFlags.BitField + + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal: + // We also need to handle the case where, although the sig's + // flags aren't valid, the key can is implicitly usable for + // encryption by virtue of being ElGamal. See also the comment + // in encryptionKey() above. + usage |= packet.KeyFlagEncryptCommunications + usage |= packet.KeyFlagEncryptStorage + + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoDSA || + key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoECDSA || + key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoEdDSA: + usage |= packet.KeyFlagSign + + // For a primary RSA key without any key flags, be as permissiable + // as possible. + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoRSA && + keyMatchesIdAndFingerprint(key.Entity.PrimaryKey, id, fp): + usage = (packet.KeyFlagCertify | packet.KeyFlagSign | + packet.KeyFlagEncryptCommunications | packet.KeyFlagEncryptStorage) + } + + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && subKey.PrivateKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Sig.GetKeyFlags()}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } + + panic("unreachable") +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } else { + e.PrimaryKey = &e.PrivateKey.PublicKey + } + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var current *Identity + var revocations []*packet.Signature + + designatedRevokers := make(map[uint64]bool) +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + switch pkt := p.(type) { + case *packet.UserId: + + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + current = new(Identity) + current.Name = pkt.Id + current.UserId = pkt + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + // These revocations won't revoke UIDs (see + // SigTypeIdentityRevocation). Handle these first, + // because key might have revocation coming from + // another key (designated revoke). + revocations = append(revocations, pkt) + continue + } + + // These are signatures by other people on this key. Let's just ignore them + // from the beginning, since they shouldn't affect our key decoding one way + // or the other. + if pkt.IssuerKeyId != nil && *pkt.IssuerKeyId != e.PrimaryKey.KeyId { + continue + } + + // If this is a signature made by the keyholder, and the signature has stubbed out + // critical packets, then *now* we need to bail out. + if e := pkt.StubbedOutCriticalError; e != nil { + return nil, e + } + + // Next handle the case of a self-signature. According to RFC8440, + // Section 5.2.3.3, if there are several self-signatures, + // we should take the newer one. If they were both created + // at the same time, but one of them has keyflags specified and the + // other doesn't, keep the one with the keyflags. We have actually + // seen this in the wild (see the 'Yield' test in read_test.go). + // If there is a tie, and both have the same value for FlagsValid, + // then "last writer wins." + // + // HOWEVER! We have seen yet more keys in the wild (see the 'Spiros' + // test in read_test.go), in which the later self-signature is a bunch + // of junk, and doesn't even specify key flags. Does it really make + // sense to overwrite reasonable key flags with the empty set? I'm not + // sure what that would be trying to achieve, and plus GPG seems to be + // ok with this situation, and ignores the later (empty) keyflag set. + // So further tighten our overwrite rules, and only allow the later + // signature to overwrite the earlier signature if so doing won't + // trash the key flags. + if current != nil && + (current.SelfSignature == nil || + (!pkt.CreationTime.Before(current.SelfSignature.CreationTime) && + (pkt.FlagsValid || !current.SelfSignature.FlagsValid))) && + (pkt.SigType == packet.SigTypePositiveCert || pkt.SigType == packet.SigTypeGenericCert) && + pkt.IssuerKeyId != nil && + *pkt.IssuerKeyId == e.PrimaryKey.KeyId { + + if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil { + + current.SelfSignature = pkt + + // NOTE(maxtaco) 2016.01.11 + // Only register an identity once we've gotten a valid self-signature. + // It's possible therefore for us to throw away `current` in the case + // no valid self-signatures were found. That's OK as long as there are + // other identities that make sense. + // + // NOTE! We might later see a revocation for this very same UID, and it + // won't be undone. We've preserved this feature from the original + // Google OpenPGP we forked from. + e.Identities[current.Name] = current + } else { + // We really should warn that there was a failure here. Not raise an error + // since this really shouldn't be a fail-stop error. + } + } else if current != nil && pkt.SigType == packet.SigTypeIdentityRevocation { + if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil { + // Note: we are not removing the identity from + // e.Identities. Caller can always filter by Revocation + // field to ignore revoked identities. + current.Revocation = pkt + } + } else if pkt.SigType == packet.SigTypeDirectSignature { + if err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, pkt); err == nil { + if desig := pkt.DesignatedRevoker; desig != nil { + // If it's a designated revoker signature, take last 8 octects + // of fingerprint as Key ID and save it to designatedRevokers + // map. We consult this map later to see if a foreign + // revocation should be added to UnverifiedRevocations. + keyID := binary.BigEndian.Uint64(desig.Fingerprint[len(desig.Fingerprint)-8:]) + designatedRevokers[keyID] = true + } + } + } else if current == nil { + // NOTE(maxtaco) + // + // See https://github.com/keybase/client/issues/2666 + // + // There might have been a user attribute picture before this signature, + // in which case this is still a valid PGP key. In the future we might + // not ignore user attributes (like picture). But either way, it doesn't + // make sense to bail out here. Keep looking for other valid signatures. + // + // Used to be: + // return nil, errors.StructuralError("signature packet found before user id packet") + } else { + current.Signatures = append(current.Signatures, pkt) + } + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + if revocation.IssuerKeyId == nil || *revocation.IssuerKeyId == e.PrimaryKey.KeyId { + // Key revokes itself, something that we can verify. + err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } else if revocation.IssuerKeyId != nil { + if _, ok := designatedRevokers[*revocation.IssuerKeyId]; ok { + // Revocation is done by certified designated revoker, + // but we can't verify the revocation. + e.UnverifiedRevocations = append(e.UnverifiedRevocations, revocation) + } + } + } + + return e, nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + var lastErr error + for { + p, err := packets.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + sig, ok := p.(*packet.Signature) + if !ok { + // Hit a non-signature packet, so assume we're up to the next key + packets.Unread(p) + break + } + if st := sig.SigType; st != packet.SigTypeSubkeyBinding && st != packet.SigTypeSubkeyRevocation { + + // Note(maxtaco): + // We used to error out here, but instead, let's fast-forward past + // packets that are in the wrong place (like misplaced 0x13 signatures) + // until we get to one that works. For a test case, + // see TestWithBadSubkeySignaturePackets. + + continue + } + err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig) + if err != nil { + // Non valid signature, so again, no need to abandon all hope, just continue; + // make a note of the error we hit. + lastErr = errors.StructuralError("subkey signature invalid: " + err.Error()) + continue + } + switch sig.SigType { + case packet.SigTypeSubkeyBinding: + // Does the "new" sig set expiration to later date than + // "previous" sig? + if subKey.Sig == nil || subKey.Sig.ExpiresBeforeOther(sig) { + subKey.Sig = sig + } + case packet.SigTypeSubkeyRevocation: + // First writer wins + if subKey.Revocation == nil { + subKey.Revocation = sig + } + } + } + + if subKey.Sig != nil { + if err := subKey.PublicKey.ErrorIfDeprecated(); err != nil { + // Key passed signature check but is deprecated. + subKey.Sig = nil + lastErr = err + } + } + + if subKey.Sig != nil { + e.Subkeys = append(e.Subkeys, subKey) + } else { + if lastErr == nil { + lastErr = errors.StructuralError("Subkey wasn't signed; expected a 'binding' signature") + } + e.BadSubkeys = append(e.BadSubkeys, BadSubkey{Subkey: subKey, Err: lastErr}) + } + return nil +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + currentTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + + // If the user passes in a DefaultHash via packet.Config, set the + // PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, to +// the given Writer. For now, it must only be used on an Entity returned from +// NewEntity. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if e.PrivateKey.PrivateKey != nil { + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if e.PrivateKey.PrivateKey != nil && !config.ReuseSignatures() { + // If not reusing existing signatures, sign subkey using private key + // (subkey binding), but also sign primary key using subkey (primary + // key binding) if subkey is used for signing. + if subkey.Sig.FlagSign { + err = subkey.Sig.CrossSignKey(e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + } + + if subkey.Revocation != nil { + err = subkey.Revocation.Serialize(w) + if err != nil { + return + } + } + + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w. (No private +// key material will be output). +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + + if subkey.Revocation != nil { + err = subkey.Revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// CopySubkeyRevocations copies subkey revocations from the src Entity over +// to the receiver entity. We need this because `gpg --export-secret-key` does +// not appear to output subkey revocations. In this case we need to manually +// merge with the output of `gpg --export`. +func (e *Entity) CopySubkeyRevocations(src *Entity) { + m := make(map[[20]byte]*packet.Signature) + for _, subkey := range src.Subkeys { + if subkey.Revocation != nil { + m[subkey.PublicKey.Fingerprint] = subkey.Revocation + } + } + for i, subkey := range e.Subkeys { + if r := m[subkey.PublicKey.Fingerprint]; r != nil { + e.Subkeys[i].Revocation = r + } + } +} + +// CheckDesignatedRevokers will try to confirm any of designated +// revocation of entity. For this function to work, revocation +// issuer's key should be found in keyring. First successfully +// verified designated revocation is returned along with the key that +// verified it. +func FindVerifiedDesignatedRevoke(keyring KeyRing, entity *Entity) (*packet.Signature, *Key) { + for _, sig := range entity.UnverifiedRevocations { + if sig.IssuerKeyId == nil { + continue + } + + issuerKeyId := *sig.IssuerKeyId + issuerFingerprint := sig.IssuerFingerprint + keys := keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign) + if len(keys) == 0 { + continue + } + for _, key := range keys { + err := key.PublicKey.VerifyRevocationSignature(entity.PrimaryKey, sig) + if err == nil { + return sig, &key + } + } + } + + return nil, nil +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000..f023fe53 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go new file mode 100644 index 00000000..f4125e18 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // ReuseSignatures tells us to reuse existing Signatures + // on serialized output. + ReuseSignaturesOnSerialize bool +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) ReuseSignatures() bool { + return c != nil && c.ReuseSignaturesOnSerialize +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go new file mode 100644 index 00000000..41de661d --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go @@ -0,0 +1,104 @@ +package packet + +import ( + "bytes" + "io" + "math/big" + + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// ECDHKdfParams generates KDF parameters sequence for given +// PublicKey. See https://tools.ietf.org/html/rfc6637#section-8 +func ECDHKdfParams(pub *PublicKey) []byte { + buf := new(bytes.Buffer) + oid := pub.ec.oid + buf.WriteByte(byte(len(oid))) + buf.Write(oid) + buf.WriteByte(18) // ECDH TYPE + pub.ecdh.serialize(buf) + buf.WriteString("Anonymous Sender ") + buf.Write(pub.Fingerprint[:]) + return buf.Bytes() +} + +func decryptKeyECDH(priv *PrivateKey, X, Y *big.Int, C []byte) (out []byte, err error) { + ecdhpriv, ok := priv.PrivateKey.(*ecdh.PrivateKey) + if !ok { + return nil, errors.InvalidArgumentError("bad internal ECDH key") + } + + Sx := ecdhpriv.DecryptShared(X, Y) + + kdfParams := ECDHKdfParams(&priv.PublicKey) + hash, ok := s2k.HashIdToHash(byte(priv.ecdh.KdfHash)) + if !ok { + return nil, errors.InvalidArgumentError("invalid hash id in private key") + } + + key := ecdhpriv.KDF(Sx, kdfParams, hash) + keySize := CipherFunction(priv.ecdh.KdfAlgo).KeySize() + + decrypted, err := ecdh.AESKeyUnwrap(key[:keySize], C) + if err != nil { + return nil, err + } + + // We have to "read ahead" to discover real length of the + // encryption key and properly unpad buffer. + cipherFunc := CipherFunction(decrypted[0]) + // +3 bytes = 1-byte cipher id and checksum 2-byte checksum. + out = ecdh.UnpadBuffer(decrypted, cipherFunc.KeySize()+3) + if out == nil { + return nil, errors.InvalidArgumentError("invalid padding while ECDH") + } + return out, nil +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *PublicKey, keyBlock []byte) error { + ecdhpub := pub.PublicKey.(*ecdh.PublicKey) + kdfParams := ECDHKdfParams(pub) + + hash, ok := s2k.HashIdToHash(byte(pub.ecdh.KdfHash)) + if !ok { + return errors.InvalidArgumentError("invalid hash id in private key") + } + + kdfKeySize := CipherFunction(pub.ecdh.KdfAlgo).KeySize() + Vx, Vy, C, err := ecdhpub.Encrypt(rand, kdfParams, keyBlock, hash, kdfKeySize) + if err != nil { + return err + } + + mpis, mpiBitLen := ecdh.Marshal(ecdhpub.Curve, Vx, Vy) + + packetLen := len(header) /* header length in bytes */ + packetLen += 2 /* mpi length in bits */ + len(mpis) + packetLen += 1 /* ciphertext size in bytes */ + len(C) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + + _, err = w.Write([]byte{byte(mpiBitLen >> 8), byte(mpiBitLen)}) + if err != nil { + return err + } + + _, err = w.Write(mpis[:]) + if err != nil { + return err + } + + w.Write([]byte{byte(len(C))}) + w.Write(C[:]) + return nil +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000..2a6a0416 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,231 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI + ecdh_C []byte +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + case PubKeyAlgoECDH: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return err + } + _, err = readFull(r, buf[:1]) // read C len (1 byte) + if err != nil { + return err + } + e.ecdh_C = make([]byte, int(buf[0])) + _, err = readFull(r, e.ecdh_C) + } + + if err != nil { + return err + } + + _, err = consumeAll(r) + return err +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if priv == nil || priv.PrivateKey == nil { + return errors.InvalidArgumentError("attempting to decrypt with nil PrivateKey") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + // Note: Unmarshal checks if point is on the curve. + c1, c2 := ecdh.Unmarshal(priv.PrivateKey.(*ecdh.PrivateKey).Curve, e.encryptedMPI1.bytes) + if c1 == nil { + return errors.InvalidArgumentError("failed to parse EC point for encryption key") + } + b, err = decryptKeyECDH(priv, c1, c2, e.ecdh_C) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub, keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go new file mode 100644 index 00000000..1a9ec6e5 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000..ce2a33a5 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000..af404bb1 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,74 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000..cdeea012 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go new file mode 100644 index 00000000..eb61eda9 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,576 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/keybase/go-crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/elliptic" + "io" + "math/big" + + "github.com/keybase/go-crypto/cast5" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } + + panic("unreachable") +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeIdentityRevocation = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + PubKeyAlgoBadElGamal PublicKeyAlgorithm = 20 // Reserved (deprecated, formerly ElGamal Encrypt or Sign) + // RFC -1 + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +func WritePaddedBigInt(w io.Writer, length int, X *big.Int) (n int, err error) { + bytes := X.Bytes() + n1, err := w.Write(make([]byte, length-len(bytes))) + if err != nil { + return n1, err + } + n2, err := w.Write(bytes) + if err != nil { + return n2, err + } + return (n1 + n2), err +} + +// Minimum number of bytes to fit the curve coordinates. All +// coordinates have to be 0-padded to this length. +func mpiPointByteLength(curve elliptic.Curve) int { + return (curve.Params().P.BitLen() + 7) / 8 +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000..5305b1f6 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,557 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/ed25519" + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey. + sha1Checksum bool + iv []byte + s2kHeader []byte +} + +type EdDSAPrivateKey struct { + PrivateKey + seed parsedMPI +} + +func (e *EdDSAPrivateKey) Sign(digest []byte) (R, S []byte, err error) { + r := bytes.NewReader(e.seed.bytes) + publicKey, privateKey, err := ed25519.GenerateKey(r) + if err != nil { + return nil, nil, err + } + + if !bytes.Equal(publicKey, e.PublicKey.edk.p.bytes[1:]) { // [1:] because [0] is 0x40 mpi header + return nil, nil, errors.UnsupportedError("EdDSA: Private key does not match public key.") + } + + sig := ed25519.Sign(privateKey, digest) + + sigLen := ed25519.SignatureSize / 2 + return sig[:sigLen], sig[sigLen:], nil +} + +func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(currentTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + // S2K == nil implies that we got a "GNU Dummy" S2K. For instance, + // because our master secret key is on a USB key in a vault somewhere. + // In that case, there is no further data to consume here. + if pk.s2k == nil { + pk.Encrypted = false + return + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +// Encrypt is the counterpart to the Decrypt() method below. It encrypts +// the private key with the provided passphrase. If config is nil, then +// the standard, and sensible, defaults apply. +// +// A key will be derived from the given passphrase using S2K Specifier +// Type 3 (Iterated + Salted, see RFC-4880 Sec. 3.7.1.3). This choice +// is hardcoded in s2k.Serialize(). S2KCount is hardcoded to 0, which is +// equivalent to 65536. And the hash algorithm for key-derivation can be +// set with config. The encrypted PrivateKey, using the algorithm specified +// in config (if provided), is written out to the encryptedData member. +// When Serialize() is called, this encryptedData member will be +// serialized, using S2K Usage value of 254, and thus SHA1 checksum. +func (pk *PrivateKey) Encrypt(passphrase []byte, config *Config) (err error) { + if pk.PrivateKey == nil { + return errors.InvalidArgumentError("there is no private key to encrypt") + } + + pk.sha1Checksum = true + pk.cipher = config.Cipher() + s2kConfig := s2k.Config{ + Hash: config.Hash(), + S2KCount: 0, + } + s2kBuf := bytes.NewBuffer(nil) + derivedKey := make([]byte, pk.cipher.KeySize()) + err = s2k.Serialize(s2kBuf, derivedKey, config.Random(), passphrase, &s2kConfig) + if err != nil { + return err + } + + pk.s2kHeader = s2kBuf.Bytes() + // No good way to set pk.s2k but to call s2k.Parse(), + // even though we have all the information here, but + // most of the functions needed are private to s2k. + pk.s2k, err = s2k.Parse(s2kBuf) + pk.iv = make([]byte, pk.cipher.blockSize()) + if _, err = config.Random().Read(pk.iv); err != nil { + return err + } + + privateKeyBuf := bytes.NewBuffer(nil) + if err = pk.serializePrivateKey(privateKeyBuf); err != nil { + return err + } + + checksum := sha1.Sum(privateKeyBuf.Bytes()) + if _, err = privateKeyBuf.Write(checksum[:]); err != nil { + return err + } + + pkData := privateKeyBuf.Bytes() + block := pk.cipher.new(derivedKey) + pk.encryptedData = make([]byte, len(pkData)) + cfb := cipher.NewCFBEncrypter(block, pk.iv) + cfb.XORKeyStream(pk.encryptedData, pkData) + pk.Encrypted = true + return nil +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + + privateKeyBuf := bytes.NewBuffer(nil) + + if pk.PrivateKey == nil { + _, err = buf.Write([]byte{ + 254, // SHA-1 Convention + 9, // Encryption scheme (AES256) + 101, // GNU Extensions + 2, // Hash value (SHA1) + 'G', 'N', 'U', // "GNU" as a string + 1, // Extension type 1001 (minus 1000) + }) + } else if pk.Encrypted { + _, err = buf.Write([]byte{ + 254, // SHA-1 Convention + byte(pk.cipher), // Encryption scheme + }) + if err != nil { + return err + } + if _, err = buf.Write(pk.s2kHeader); err != nil { + return err + } + if _, err = buf.Write(pk.iv); err != nil { + return err + } + if _, err = privateKeyBuf.Write(pk.encryptedData); err != nil { + return err + } + } else { + buf.WriteByte(0 /* no encryption */) + if err = pk.serializePrivateKey(privateKeyBuf); err != nil { + return err + } + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + totalLen := len(contents) + len(privateKeyBytes) + if !pk.Encrypted { + totalLen += 2 + } + err = serializeHeader(w, ptype, totalLen) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + if len(privateKeyBytes) > 0 && !pk.Encrypted { + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + } + + return +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + case *EdDSAPrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + + return err +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *EdDSAPrivateKey) error { + return writeMPI(w, priv.seed.bitLength, priv.seed.bytes) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + // For GNU Dummy S2K, there's no key here, so don't do anything. + if pk.s2k == nil { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + priv := new(ecdh.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(d) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := new(ecdsa.PrivateKey) + ecdsaPriv.PublicKey = *ecdsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + ecdsaPriv.D = new(big.Int).SetBytes(d) + pk.PrivateKey = ecdsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPriv := new(EdDSAPrivateKey) + eddsaPriv.PublicKey = pk.PublicKey + + buf := bytes.NewBuffer(data) + eddsaPriv.seed.bytes, eddsaPriv.seed.bitLength, err = readMPI(buf) + if err != nil { + return err + } + + if bLen := len(eddsaPriv.seed.bytes); bLen != 32 { // 32 bytes private part of ed25519 key. + return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA private key length: %d", bLen)) + } + + pk.PrivateKey = eddsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000..a46a008a --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,990 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/brainpool" + "github.com/keybase/go-crypto/curve25519" + "github.com/keybase/go-crypto/ed25519" + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +var ( + // NIST curve P-224 + oidCurveP224 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x21} + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} + // Brainpool curve P-256r1 + oidCurveP256r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07} + // Brainpool curve P-384r1 + oidCurveP384r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B} + // Brainpool curve P-512r1 + oidCurveP512r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D} + // EdDSA + oidEdDSA []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01} + // cv25519 + oidCurve25519 []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01} +) + +const maxOIDLength = 10 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +type edDSAkey struct { + ecdsaKey +} + +func copyFrontFill(dst, src []byte, length int) int { + if srcLen := len(src); srcLen < length { + return copy(dst[length-srcLen:], src[:]) + } else { + return copy(dst[:], src[:]) + } +} + +func (e *edDSAkey) Verify(payload []byte, r parsedMPI, s parsedMPI) bool { + const halfSigSize = ed25519.SignatureSize / 2 + var sig [ed25519.SignatureSize]byte + + // NOTE: The first byte is 0x40 - MPI header + // TODO: Maybe clean the code up and use 0x40 as a header when + // reading and keep only actual number in p field. Find out how + // other MPIs are stored. + key := e.p.bytes[1:] + + // Note: it may happen that R + S do not form 64-byte signature buffer that + // ed25519 expects, but because we copy it over to an array of exact size, + // we will always pass correctly sized slice to Verify. Slice too short + // would make ed25519 panic(). + copyFrontFill(sig[:halfSigSize], r.bytes, halfSigSize) + copyFrontFill(sig[halfSigSize:], s.bytes, halfSigSize) + + return ed25519.Verify(key, payload, sig[:]) +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return err +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func getCurveByOid(oid []byte) elliptic.Curve { + switch { + case bytes.Equal(oid, oidCurveP224): + return elliptic.P224() + case bytes.Equal(oid, oidCurveP256): + return elliptic.P256() + case bytes.Equal(oid, oidCurveP384): + return elliptic.P384() + case bytes.Equal(oid, oidCurveP521): + return elliptic.P521() + case bytes.Equal(oid, oidCurveP256r1): + return brainpool.P256r1() + case bytes.Equal(oid, oidCurveP384r1): + return brainpool.P384r1() + case bytes.Equal(oid, oidCurveP512r1): + return brainpool.P512r1() + case bytes.Equal(oid, oidCurve25519): + return curve25519.Cv25519() + default: + return nil + } +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c = getCurveByOid(f.oid) + // Curve25519 should not be used in ECDSA. + if c == nil || bytes.Equal(f.oid, oidCurve25519) { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + // Note: Unmarshal already checks if point is on curve. + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) newECDH() (*ecdh.PublicKey, error) { + var c = getCurveByOid(f.oid) + if c == nil { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + // ecdh.Unmarshal handles unmarshaling for all curve types. It + // also checks if point is on curve. + x, y := ecdh.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdh.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf + + // EdDSA fields (no RFC available), uses ecdsa scaffolding + edk *edDSAkey +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func FromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +func FromBytes(bytes []byte) parsedMPI { + return parsedMPI{ + bytes: bytes, + bitLength: uint16(8 * len(bytes)), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: FromBig(pub.N), + e: FromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: FromBig(pub.P), + q: FromBig(pub.Q), + g: FromBig(pub.G), + y: FromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// check EdDSA public key material. +// There is currently no RFC for it, but it doesn't mean it's not +// implemented or in use. +func (e *edDSAkey) check() error { + if !bytes.Equal(e.oid, oidEdDSA) { + return errors.UnsupportedError(fmt.Sprintf("Bad OID for EdDSA key: %v", e.oid)) + } + if bLen := len(e.p.bytes); bLen != 33 { // 32 bytes for ed25519 key and 1 byte for 0x40 header + return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA public key length: %d", bLen)) + } + return nil +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: FromBig(pub.P), + g: FromBig(pub.G), + y: FromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func getCurveOid(curve elliptic.Curve) (res []byte, err error) { + switch curve { + case elliptic.P224(): + res = oidCurveP224 + case elliptic.P256(): + res = oidCurveP256 + case elliptic.P384(): + res = oidCurveP384 + case elliptic.P521(): + res = oidCurveP521 + case brainpool.P256r1(): + res = oidCurveP256r1 + case brainpool.P384r1(): + res = oidCurveP384r1 + case brainpool.P512r1(): + res = oidCurveP512r1 + case curve25519.Cv25519(): + res = oidCurve25519 + default: + err = errors.UnsupportedError("unknown curve") + } + return +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + oid, _ := getCurveOid(pub.Curve) + pk.ec.oid = oid + bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bytes = bs + pk.ec.p.bitLength = uint16(bitLen) + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + ec: new(ecdsaKey), + } + oid, _ := getCurveOid(pub.Curve) + pk.ec.oid = oid + bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bytes = bs + pk.ec.p.bitLength = uint16(bitLen) + + hashbyte, _ := s2k.HashToHashId(crypto.SHA512) + pk.ecdh = &ecdhKdf{ + KdfHash: kdfHashFunction(hashbyte), + KdfAlgo: kdfAlgorithm(CipherAES256), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoEdDSA: + pk.edk = new(edDSAkey) + if err = pk.edk.parse(r); err != nil { + return err + } + err = pk.edk.check() + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + pk.PublicKey, err = pk.ec.newECDH() + case PubKeyAlgoBadElGamal: + // Key has ElGamal format but nil-implementation - it will + // load but it's not possible to do any operations using this + // key. + err = pk.parseElGamal(r) + if err != nil { + pk.PublicKey = nil + } + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 7 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + // Warning: incompatibility with crypto/rsa: keybase fork uses + // int64 public exponents instead of int32. + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int64(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + case PubKeyAlgoEdDSA: + pLength += uint16(pk.edk.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + case PubKeyAlgoEdDSA: + length += pk.edk.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoEdDSA: + return pk.edk.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + // NOTE(maxtaco) 2016-08-22 + // + // We used to do this: + // + // if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + // return errors.SignatureError("hash tag doesn't match") + // } + // + // But don't do anything in this case. Some GPGs generate bad + // 2-byte hash prefixes, but GPG also doesn't seem to care on + // import. See BrentMaxwell's key. I think it's safe to disable + // this check! + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + if !pk.edk.Verify(hashBytes, sig.EdDSASigR, sig.EdDSASigS) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } + panic("unreachable") +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } + panic("unreachable") +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + updateKeySignatureHash(pk, signed, h) + + return +} + +// updateKeySignatureHash does the actual hash updates for keySignatureHash. +func updateKeySignatureHash(pk, signed signingKey, h hash.Hash) { + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + + // BUG(maxtaco) + // + // We should check for more than FlagsSign here, because if + // you read keys.go, we can sometimes use signing subkeys even if they're + // not explicitly flagged as such. However, so doing fails lots of currently + // working tests, so I'm not going to do much here. + // + // In other words, we should have this disjunction in the condition above: + // + // || (!sig.FlagsValid && pk.PubKeyAlgo.CanSign()) { + // + + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(revokedKey *PublicKey, sig *Signature) (err error) { + h, err := keyRevocationHash(revokedKey, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +type teeHash struct { + h hash.Hash +} + +func (t teeHash) Write(b []byte) (n int, err error) { + fmt.Printf("hash -> %s %+v\n", string(b), b) + return t.h.Write(b) +} +func (t teeHash) Sum(b []byte) []byte { return t.h.Sum(b) } +func (t teeHash) Reset() { t.h.Reset() } +func (t teeHash) Size() int { return t.h.Size() } +func (t teeHash) BlockSize() int { return t.h.BlockSize() } + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + updateUserIdSignatureHash(id, pk, h) + + return +} + +// updateUserIdSignatureHash does the actual hash updates for +// userIdSignatureHash. +func updateUserIdSignatureHash(id string, pk *PublicKey, h hash.Hash) { + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. Used for +// displaying key information, actual buffers and BigInts inside may +// have non-matching different size if the key is invalid. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + bitLength = pk.p.bitLength + case PubKeyAlgoECDH: + ecdhPublicKey := pk.PublicKey.(*ecdh.PublicKey) + bitLength = uint16(ecdhPublicKey.Curve.Params().BitSize) + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + bitLength = uint16(ecdsaPublicKey.Curve.Params().BitSize) + case PubKeyAlgoEdDSA: + // EdDSA only support ed25519 curves right now, just return + // the length. Also, we don't have any PublicKey.Curve object + // to look the size up from. + bitLength = 256 + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +func (pk *PublicKey) ErrorIfDeprecated() error { + switch pk.PubKeyAlgo { + case PubKeyAlgoBadElGamal: + return errors.DeprecatedKeyError("ElGamal Encrypt or Sign (algo 20) is deprecated") + default: + return nil + } +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 00000000..f75cbeab --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,282 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: FromBig(pub.N), + e: FromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 7 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + // Warning: incompatibility with crypto/rsa: keybase fork uses + // int64 public exponents instead of int32. + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int64(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } + panic("unreachable") +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go new file mode 100644 index 00000000..957b3b89 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go new file mode 100644 index 00000000..383a8a6a --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,923 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/binary" + "fmt" + "hash" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signer can be implemented by application code to do actual signing. +type Signer interface { + hash.Hash + Sign(sig *Signature) error + KeyId() uint64 + PublicKeyAlgo() PublicKeyAlgorithm +} + +// RevocationKey represents designated revoker packet. See RFC 4880 +// section 5.2.3.15 for details. +type RevocationKey struct { + Class byte + PublicKeyAlgo PublicKeyAlgorithm + Fingerprint []byte +} + +// KeyFlagBits holds boolean whether any usage flags were provided in +// the signature and BitField with KeyFlag* flags. +type KeyFlagBits struct { + Valid bool + BitField byte +} + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + EdDSASigR, EdDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredKeyServer string + IssuerKeyId *uint64 + IsPrimaryId *bool + IssuerFingerprint []byte + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // PolicyURI is optional. See RFC 4880, Section 5.2.3.20 for details + PolicyURI string + + // Regex is a regex that can match a PGP UID. See RFC 4880, 5.2.3.14 for details + Regex string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + // StubbedOutCriticalError is not fail-stop, since it shouldn't break key parsing + // when appearing in WoT-style cross signatures. But it should prevent a signature + // from being applied to a primary or subkey. + StubbedOutCriticalError error + + // DesignaterRevoker will be present if this signature certifies a + // designated revoking key id (3rd party key that can sign + // revocation for this key). + DesignatedRevoker *RevocationKey + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoEdDSA: + sig.EdDSASigR.bytes, sig.EdDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.EdDSASigS.bytes, sig.EdDSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + regularExpressionSubpacket signatureSubpacketType = 6 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + revocationKey signatureSubpacketType = 12 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + prefKeyServerSubpacket signatureSubpacketType = 24 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyURISubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprint signatureSubpacketType = 33 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + if subpacket[0] != 0 { + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyURISubpacket: + // See RFC 4880, Section 5.2.3.20 + sig.PolicyURI = string(subpacket[:]) + case regularExpressionSubpacket: + sig.Regex = string(subpacket[:]) + if isCritical { + sig.StubbedOutCriticalError = errors.UnsupportedError("regex support is stubbed out") + } + case prefKeyServerSubpacket: + sig.PreferredKeyServer = string(subpacket[:]) + case issuerFingerprint: + // The first byte is how many bytes the fingerprint is, but we'll just + // read until the end of the subpacket, so we'll ignore it. + sig.IssuerFingerprint = append([]byte{}, subpacket[1:]...) + case revocationKey: + // Authorizes the specified key to issue revocation signatures + // for a key. + + // TODO: Class octet must have bit 0x80 set. If the bit 0x40 + // is set, then this means that the revocation information is + // sensitive. + sig.DesignatedRevoker = &RevocationKey{ + Class: subpacket[0], + PublicKeyAlgo: PublicKeyAlgorithm(subpacket[1]), + Fingerprint: append([]byte{}, subpacket[2:]...), + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// ExpiresBeforeOther checks if other signature has expiration at +// later date than sig. +func (sig *Signature) ExpiresBeforeOther(other *Signature) bool { + if sig.KeyLifetimeSecs == nil { + // This sig never expires, or has infinitely long expiration + // time. + return false + } else if other.KeyLifetimeSecs == nil { + // This sig expires at some non-infinite point, but the other + // sig never expires. + return true + } + + getExpiryDate := func(s *Signature) time.Time { + return s.CreationTime.Add(time.Duration(*s.KeyLifetimeSecs) * time.Second) + } + + return getExpiryDate(other).After(getExpiryDate(sig)) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + signer, hashIsSigner := h.(Signer) + + if !hashIsSigner && (priv == nil || priv.PrivateKey == nil) { + err = errors.InvalidArgumentError("attempting to sign with nil PrivateKey") + return + } + + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + if hashIsSigner { + err = signer.Sign(sig) + return + } + + // Parameter check, if this is wrong we will make a signature but + // not serialize it later. + if sig.PubKeyAlgo != priv.PubKeyAlgo { + err = errors.InvalidArgumentError("signature pub key algo does not match priv key") + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err != nil { + return err + } + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + case PubKeyAlgoECDSA: + r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest) + if err != nil { + return err + } + sig.ECDSASigR = FromBig(r) + sig.ECDSASigS = FromBig(s) + case PubKeyAlgoEdDSA: + r, s, err := priv.PrivateKey.(*EdDSAPrivateKey).Sign(digest) + if err != nil { + return err + } + sig.EdDSASigR = FromBytes(r) + sig.EdDSASigS = FromBytes(s) + default: + err = errors.UnsupportedError("public key algorithm for signing: " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + return +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignUserIdWithSigner computes a signature from priv, asserting that pub is a +// valid key for the identity id. On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserIdWithSigner(id string, pub *PublicKey, s Signer, config *Config) error { + updateUserIdSignatureHash(id, pub, s) + + return sig.Sign(s, nil, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKeyWithSigner computes a signature using s, asserting that +// signeePubKey is a subkey. On success, the signature is stored in sig. Call +// Serialize to write it out. If config is nil, sensible defaults will be used. +func (sig *Signature) SignKeyWithSigner(signeePubKey *PublicKey, signerPubKey *PublicKey, s Signer, config *Config) error { + updateKeySignatureHash(signerPubKey, signeePubKey, s) + + return sig.Sign(s, nil, config) +} + +// CrossSignKey creates PrimaryKeyBinding signature in sig.EmbeddedSignature by +// signing `primary` key's hash using `priv` subkey private key. Primary public +// key is the `signee` here. +func (sig *Signature) CrossSignKey(primary *PublicKey, priv *PrivateKey, config *Config) error { + if len(sig.outSubpackets) > 0 { + return fmt.Errorf("outSubpackets already exists, looks like CrossSignKey was called after Sign") + } + + sig.EmbeddedSignature = &Signature{ + CreationTime: sig.CreationTime, + SigType: SigTypePrimaryKeyBinding, + PubKeyAlgo: priv.PubKeyAlgo, + Hash: sig.Hash, + } + + h, err := keySignatureHash(primary, &priv.PublicKey, sig.Hash) + if err != nil { + return err + } + return sig.EmbeddedSignature.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && + sig.DSASigR.bytes == nil && + sig.ECDSASigR.bytes == nil && + sig.EdDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoEdDSA: + sigLength = 2 + len(sig.EdDSASigR.bytes) + sigLength += 2 + len(sig.EdDSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoEdDSA: + err = writeMPIs(w, sig.EdDSASigR, sig.EdDSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{sig.GetKeyFlags().BitField}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if sig.EmbeddedSignature != nil { + buf := bytes.NewBuffer(nil) + if err := sig.EmbeddedSignature.Serialize(buf); err == nil { + byteContent := buf.Bytes()[2:] // skip 2-byte length header + subpackets = append(subpackets, outputSubpacket{false, embeddedSignatureSubpacket, true, byteContent}) + } + } + + return +} + +func (sig *Signature) GetKeyFlags() (ret KeyFlagBits) { + if !sig.FlagsValid { + return ret + } + + ret.Valid = true + if sig.FlagCertify { + ret.BitField |= KeyFlagCertify + } + if sig.FlagSign { + ret.BitField |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + ret.BitField |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + ret.BitField |= KeyFlagEncryptStorage + } + return ret +} + +func (f *KeyFlagBits) HasFlagCertify() bool { + return f.BitField&KeyFlagCertify != 0 +} + +func (f *KeyFlagBits) HasFlagSign() bool { + return f.BitField&KeyFlagSign != 0 +} + +func (f *KeyFlagBits) HasFlagEncryptCommunications() bool { + return f.BitField&KeyFlagEncryptCommunications != 0 +} + +func (f *KeyFlagBits) HasFlagEncryptStorage() bool { + return f.BitField&KeyFlagEncryptStorage != 0 +} + +func (f *KeyFlagBits) Merge(other KeyFlagBits) { + if other.Valid { + f.Valid = true + f.BitField |= other.BitField + } +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go new file mode 100644 index 00000000..dfca651b --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000..b92c1d77 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,158 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + if ske.s2k == nil { + return errors.UnsupportedError("can't use dummy S2K for symmetric key encryption") + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000..fd4f8f01 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,291 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000..96a2b382 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// the user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go new file mode 100644 index 00000000..d6bea7d4 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/patch.sh b/vendor/github.com/keybase/go-crypto/openpgp/patch.sh new file mode 100644 index 00000000..23cacc83 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/patch.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +patch < sig-v3.patch +patch < s2k-gnu-dummy.patch +find . -type f -name '*.go' -exec sed -i'' -e 's/golang.org\/x\/crypto\/openpgp/github.com\/keybase\/go-crypto\/openpgp/' {} \; +find . -type f -name '*.go-e' -exec rm {} \; +go test ./... diff --git a/vendor/github.com/keybase/go-crypto/openpgp/read.go b/vendor/github.com/keybase/go-crypto/openpgp/read.go new file mode 100644 index 00000000..5caf7e39 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/read.go @@ -0,0 +1,507 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/keybase/go-crypto/openpgp" + +import ( + "crypto" + "crypto/hmac" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + // Does the Message include multiple signatures? Also called "nested signatures". + MultiSig bool + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId, nil) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if pk.key.PrivateKey.PrivateKey == nil { + // Key is stubbed + continue + } + if len(pk.encryptedKey.Key) == 0 { + err := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if err != nil { + continue + } + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if md.IsSigned { + // If IsSigned is set, it means we have multiple + // OnePassSignature packets. + md.MultiSig = true + if md.SignedBy != nil { + // We've already found the signature we were looking + // for, made by key that we had in keyring and can + // check signature against. Continue with that instead + // of trying to find another. + continue FindLiteralData + } + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, nil, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + for { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + if scr.md.MultiSig { + // If we are in MultiSig, we might have found other + // signature that cannot be verified using our key. + // Clear Signature field so it's clear for consumers + // that this message failed to verify. + scr.md.Signature = nil + } + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + var err error + if keyID := scr.md.Signature.IssuerKeyId; keyID != nil { + if *keyID != scr.md.SignedBy.PublicKey.KeyId { + if scr.md.MultiSig { + continue // try again to find a sig we can verify + } + err = errors.StructuralError("bad key id") + } + } + if fingerprint := scr.md.Signature.IssuerFingerprint; fingerprint != nil { + if !hmac.Equal(fingerprint, scr.md.SignedBy.PublicKey.Fingerprint[:]) { + if scr.md.MultiSig { + continue // try again to find a sig we can verify + } + err = errors.StructuralError("bad key fingerprint") + } + } + if err == nil { + err = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } + scr.md.SignatureError = err + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // Parse only one packet by default, unless message is MultiSig. Then + // we ask for more packets after discovering non-matching signature, + // until we find one that we can verify. + break + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + signer, _, err = checkDetachedSignature(keyring, signed, signature) + return signer, err +} + +func checkDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { + var issuerKeyId uint64 + var issuerFingerprint []byte + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + issuerFingerprint = sig.IssuerFingerprint + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, &issuerKeyId, nil + } + } + + return nil, nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + signer, _, err = checkArmoredDetachedSignature(keyring, signed, signature) + return signer, err +} + +func checkArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + return checkDetachedSignature(keyring, signed, body) +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000..01bb6785 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,326 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "github.com/keybase/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +func parseGNUExtensions(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + // A three-byte string identifier + _, err = io.ReadFull(r, buf[:3]) + if err != nil { + return + } + gnuExt := string(buf[:3]) + + if gnuExt != "GNU" { + return nil, errors.UnsupportedError("Malformed GNU extension: " + gnuExt) + } + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + gnuExtType := int(buf[0]) + switch gnuExtType { + case 1: + return nil, nil + case 2: + // Read a serial number, which is prefixed by a 1-byte length. + // The maximum length is 16. + var lenBuf [1]byte + _, err = io.ReadFull(r, lenBuf[:]) + if err != nil { + return + } + + maxLen := 16 + ivLen := int(lenBuf[0]) + if ivLen > maxLen { + ivLen = maxLen + } + ivBuf := make([]byte, ivLen) + // For now we simply discard the IV + _, err = io.ReadFull(r, ivBuf) + if err != nil { + return + } + return nil, nil + default: + return nil, errors.UnsupportedError("unknown S2K GNU protection mode: " + strconv.Itoa(int(gnuExtType))) + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + // GNU Extensions; handle them before we try to look for a hash, which won't + // be needed in most cases anyway. + if buf[0] == 101 { + return parseGNUExtensions(r) + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id, or panics if id is unknown. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch b/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch new file mode 100644 index 00000000..bfd764af --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch @@ -0,0 +1,135 @@ +diff --git a/openpgp/read.go b/openpgp/read.go +index a6cecc5..0c9397b 100644 +--- a/openpgp/read.go ++++ b/openpgp/read.go +@@ -56,8 +56,9 @@ type MessageDetails struct { + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) +- SignatureError error // nil if the signature is good. +- Signature *packet.Signature // the signature packet itself. ++ SignatureError error // nil if the signature is good. ++ Signature *packet.Signature // the signature packet itself, if v4 (default) ++ SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser + } +@@ -334,13 +335,15 @@ func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + } + + var ok bool +- if scr.md.Signature, ok = p.(*packet.Signature); !ok { ++ if scr.md.Signature, ok = p.(*packet.Signature); ok { ++ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) ++ } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { ++ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) ++ } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + +- scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) +- + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. +diff --git a/openpgp/read_test.go b/openpgp/read_test.go +index 52f942c..abe8d7b 100644 +--- a/openpgp/read_test.go ++++ b/openpgp/read_test.go +@@ -13,6 +13,7 @@ import ( + "strings" + "testing" + ++ "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + ) + +@@ -411,6 +412,50 @@ func TestIssue11504(t *testing.T) { + testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130") + } + ++// TestSignatureV3Message tests the verification of V3 signature, generated ++// with a modern V4-style key. Some people have their clients set to generate ++// V3 signatures, so it's useful to be able to verify them. ++func TestSignatureV3Message(t *testing.T) { ++ sig, err := armor.Decode(strings.NewReader(signedMessageV3)) ++ if err != nil { ++ t.Error(err) ++ return ++ } ++ key, err := ReadArmoredKeyRing(strings.NewReader(keyV4forVerifyingSignedMessageV3)) ++ if err != nil { ++ t.Error(err) ++ return ++ } ++ md, err := ReadMessage(sig.Body, key, nil, nil) ++ if err != nil { ++ t.Error(err) ++ return ++ } ++ ++ _, err = ioutil.ReadAll(md.UnverifiedBody) ++ if err != nil { ++ t.Error(err) ++ return ++ } ++ ++ // We'll see a sig error here after reading in the UnverifiedBody above, ++ // if there was one to see. ++ if err = md.SignatureError; err != nil { ++ t.Error(err) ++ return ++ } ++ ++ if md.SignatureV3 == nil { ++ t.Errorf("No available signature after checking signature") ++ return ++ } ++ if md.Signature != nil { ++ t.Errorf("Did not expect a signature V4 back") ++ return ++ } ++ return ++} ++ + const testKey1KeyId = 0xA34D7E18C20C31BB + const testKey3KeyId = 0x338934250CCC0360 + +@@ -504,3 +549,36 @@ const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6 + const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + + const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` ++ ++const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- ++Comment: GPGTools - https://gpgtools.org ++ ++mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY ++BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z ++tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 ++JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV ++/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ ++K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H ++JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx ++YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 ++b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi ++UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M ++pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM ++AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz ++786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd ++EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB ++=RZia ++-----END PGP PUBLIC KEY BLOCK----- ++` ++ ++const signedMessageV3 = `-----BEGIN PGP MESSAGE----- ++Comment: GPGTools - https://gpgtools.org ++ ++owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP ++q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka ++uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka ++DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d ++iT57d/OhWwA= ++=hG7R ++-----END PGP MESSAGE----- ++` diff --git a/vendor/github.com/keybase/go-crypto/openpgp/write.go b/vendor/github.com/keybase/go-crypto/openpgp/write.go new file mode 100644 index 00000000..89ef132b --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/openpgp/write.go @@ -0,0 +1,506 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +// SignWithSigner signs the message of type sigType with s and writes the +// signature to w. +// If config is nil, sensible defaults will be used. +func SignWithSigner(s packet.Signer, w io.Writer, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + keyId := s.KeyId() + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = s.PublicKeyAlgo() + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &keyId + + s.Reset() + + wrapped := s.(hash.Hash) + + if sigType == packet.SigTypeText { + wrapped = NewCanonicalTextHash(s) + } + + io.Copy(wrapped, message) + + err = sig.Sign(s, nil, config) + if err != nil { + return + } + + err = sig.Serialize(w) + + return +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signerSubkey, ok := signer.signingKey(config.Now()) + if !ok { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + if signerSubkey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signerSubkey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signerSubkey.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signerSubkey.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signerSubkey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + + // If no preferences were specified, assume something safe and reasonable. + defaultCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES192), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + + defaultHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.RIPEMD160), + } + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common ciphers") + } + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common hashes") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specifed by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(encryptedData); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := encryptedData + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{encryptedData} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// AttachedSign is like openpgp.Encrypt (as in p.crypto/openpgp/write.go), but +// don't encrypt at all, just sign the literal unencrypted data. +// Unfortunately we need to duplicate some code here that's already +// in write.go +func AttachedSign(out io.WriteCloser, signed Entity, hints *FileHints, + config *packet.Config) (in io.WriteCloser, err error) { + + if hints == nil { + hints = &FileHints{} + } + + if config == nil { + config = &packet.Config{} + } + + var signer *packet.PrivateKey + + signKey, ok := signed.signingKey(config.Now()) + if !ok { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + signer = signKey.PrivateKey + if signer == nil { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + if signer.Encrypted { + err = errors.InvalidArgumentError("signing key must be decrypted") + return + } + + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + out, err = packet.SerializeCompressed(out, algo, compConfig) + if err != nil { + return + } + } + + hasher := crypto.SHA512 + + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hasher, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + + if err = ops.Serialize(out); err != nil { + return + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + + // We don't want the literal serializer to closer the output stream + // since we're going to need to write to it when we finish up the + // signature stuff. + in, err = packet.SerializeLiteral(noOpCloser{out}, hints.IsBinary, hints.FileName, epochSeconds) + + if err != nil { + return + } + + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + in = signatureWriter{out, in, hasher, hasher.New(), signer, config} + + return +} diff --git a/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go b/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go new file mode 100644 index 00000000..5c5f415c --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go @@ -0,0 +1,325 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rsa + +import ( + "crypto" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// This file implements encryption and decryption using PKCS#1 v1.5 padding. + +// PKCS1v15DecrypterOpts is for passing options to PKCS#1 v1.5 decryption using +// the crypto.Decrypter interface. +type PKCS1v15DecryptOptions struct { + // SessionKeyLen is the length of the session key that is being + // decrypted. If not zero, then a padding error during decryption will + // cause a random plaintext of this length to be returned rather than + // an error. These alternatives happen in constant time. + SessionKeyLen int +} + +// EncryptPKCS1v15 encrypts the given message with RSA and the padding scheme from PKCS#1 v1.5. +// The message must be no longer than the length of the public modulus minus 11 bytes. +// +// The rand parameter is used as a source of entropy to ensure that encrypting +// the same message twice doesn't result in the same ciphertext. +// +// WARNING: use of this function to encrypt plaintexts other than session keys +// is dangerous. Use RSA OAEP in new protocols. +func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err error) { + if err := checkPub(pub); err != nil { + return nil, err + } + k := (pub.N.BitLen() + 7) / 8 + if len(msg) > k-11 { + err = ErrMessageTooLong + return + } + + // EM = 0x00 || 0x02 || PS || 0x00 || M + em := make([]byte, k) + em[1] = 2 + ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, rand) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + c := encrypt(new(big.Int), pub, m) + + copyWithLeftPad(em, c.Bytes()) + out = em + return +} + +// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5. +// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. +// +// Note that whether this function returns an error or not discloses secret +// information. If an attacker can cause this function to run repeatedly and +// learn whether each instance returned an error then they can decrypt and +// forge signatures as if they had the private key. See +// DecryptPKCS1v15SessionKey for a way of solving this problem. +func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return nil, err + } + valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext) + if err != nil { + return + } + if valid == 0 { + return nil, ErrDecryption + } + out = out[index:] + return +} + +// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5. +// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. +// It returns an error if the ciphertext is the wrong length or if the +// ciphertext is greater than the public modulus. Otherwise, no error is +// returned. If the padding is valid, the resulting plaintext message is copied +// into key. Otherwise, key is unchanged. These alternatives occur in constant +// time. It is intended that the user of this function generate a random +// session key beforehand and continue the protocol with the resulting value. +// This will remove any possibility that an attacker can learn any information +// about the plaintext. +// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA +// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology +// (Crypto '98). +// +// Note that if the session key is too small then it may be possible for an +// attacker to brute-force it. If they can do that then they can learn whether +// a random value was used (because it'll be different for the same ciphertext) +// and thus whether the padding was correct. This defeats the point of this +// function. Using at least a 16-byte key will protect against this attack. +func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return err + } + k := (priv.N.BitLen() + 7) / 8 + if k-(len(key)+3+8) < 0 { + return ErrDecryption + } + + valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext) + if err != nil { + return + } + + if len(em) != k { + // This should be impossible because decryptPKCS1v15 always + // returns the full slice. + return ErrDecryption + } + + valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key))) + subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):]) + return +} + +// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if +// rand is not nil. It returns one or zero in valid that indicates whether the +// plaintext was correctly structured. In either case, the plaintext is +// returned in em so that it may be read independently of whether it was valid +// in order to maintain constant memory access patterns. If the plaintext was +// valid then index contains the index of the original message in em. +func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) { + k := (priv.N.BitLen() + 7) / 8 + if k < 11 { + err = ErrDecryption + return + } + + c := new(big.Int).SetBytes(ciphertext) + m, err := decrypt(rand, priv, c) + if err != nil { + return + } + + em = leftPad(m.Bytes(), k) + firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + lookingForIndex := 1 + + for i := 2; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + // The PS padding must be at least 8 bytes long, and it starts two + // bytes into em. + validPS := subtle.ConstantTimeLessOrEq(2+8, index) + + valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS + index = subtle.ConstantTimeSelect(valid, index+1, 0) + return valid, em, index, nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + // In tests, the PRNG may return all zeros so we do + // this to break the loop. + s[i] ^= 0x42 + } + } + + return +} + +// These are ASN1 DER structures: +// DigestInfo ::= SEQUENCE { +// digestAlgorithm AlgorithmIdentifier, +// digest OCTET STRING +// } +// For performance, we don't use the generic ASN1 encoder. Rather, we +// precompute a prefix of the digest value that makes a valid ASN1 DER string +// with the correct contents. +var hashPrefixes = map[crypto.Hash][]byte{ + crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}, + crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, + crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, + crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, + crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, + crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, + crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix. + crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14}, +} + +// SignPKCS1v15 calculates the signature of hashed using RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5. +// Note that hashed must be the result of hashing the input message using the +// given hash function. If hash is zero, hashed is signed directly. This isn't +// advisable except for interoperability. +// +// If rand is not nil then RSA blinding will be used to avoid timing side-channel attacks. +// +// This function is deterministic. Thus, if the set of possible messages is +// small, an attacker may be able to build a map from messages to signatures +// and identify the signed messages. As ever, signatures provide authenticity, +// not confidentiality. +func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err error) { + hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) + if err != nil { + return + } + + tLen := len(prefix) + hashLen + k := (priv.N.BitLen() + 7) / 8 + if k < tLen+11 { + return nil, ErrMessageTooLong + } + + // EM = 0x00 || 0x01 || PS || 0x00 || T + em := make([]byte, k) + em[1] = 1 + for i := 2; i < k-tLen-1; i++ { + em[i] = 0xff + } + copy(em[k-tLen:k-hashLen], prefix) + copy(em[k-hashLen:k], hashed) + + m := new(big.Int).SetBytes(em) + c, err := decryptAndCheck(rand, priv, m) + if err != nil { + return + } + + copyWithLeftPad(em, c.Bytes()) + s = em + return +} + +// VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature. +// hashed is the result of hashing the input message using the given hash +// function and sig is the signature. A valid signature is indicated by +// returning a nil error. If hash is zero then hashed is used directly. This +// isn't advisable except for interoperability. +func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err error) { + hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) + if err != nil { + return + } + + tLen := len(prefix) + hashLen + k := (pub.N.BitLen() + 7) / 8 + if k < tLen+11 { + err = ErrVerification + return + } + + c := new(big.Int).SetBytes(sig) + m := encrypt(new(big.Int), pub, c) + em := leftPad(m.Bytes(), k) + // EM = 0x00 || 0x01 || PS || 0x00 || T + + ok := subtle.ConstantTimeByteEq(em[0], 0) + ok &= subtle.ConstantTimeByteEq(em[1], 1) + ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed) + ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix) + ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0) + + for i := 2; i < k-tLen-1; i++ { + ok &= subtle.ConstantTimeByteEq(em[i], 0xff) + } + + if ok != 1 { + return ErrVerification + } + + return nil +} + +func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) { + // Special case: crypto.Hash(0) is used to indicate that the data is + // signed directly. + if hash == 0 { + return inLen, nil, nil + } + + hashLen = hash.Size() + if inLen != hashLen { + return 0, nil, errors.New("crypto/rsa: input must be hashed message") + } + prefix, ok := hashPrefixes[hash] + if !ok { + return 0, nil, errors.New("crypto/rsa: unsupported hash function") + } + return +} + +// copyWithLeftPad copies src to the end of dest, padding with zero bytes as +// needed. +func copyWithLeftPad(dest, src []byte) { + numPaddingBytes := len(dest) - len(src) + for i := 0; i < numPaddingBytes; i++ { + dest[i] = 0 + } + copy(dest[numPaddingBytes:], src) +} diff --git a/vendor/github.com/keybase/go-crypto/rsa/pss.go b/vendor/github.com/keybase/go-crypto/rsa/pss.go new file mode 100644 index 00000000..8a94589b --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/rsa/pss.go @@ -0,0 +1,297 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rsa + +// This file implements the PSS signature scheme [1]. +// +// [1] http://www.rsa.com/rsalabs/pkcs/files/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf + +import ( + "bytes" + "crypto" + "errors" + "hash" + "io" + "math/big" +) + +func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) { + // See [1], section 9.1.1 + hLen := hash.Size() + sLen := len(salt) + emLen := (emBits + 7) / 8 + + // 1. If the length of M is greater than the input limitation for the + // hash function (2^61 - 1 octets for SHA-1), output "message too + // long" and stop. + // + // 2. Let mHash = Hash(M), an octet string of length hLen. + + if len(mHash) != hLen { + return nil, errors.New("crypto/rsa: input must be hashed message") + } + + // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop. + + if emLen < hLen+sLen+2 { + return nil, errors.New("crypto/rsa: encoding error") + } + + em := make([]byte, emLen) + db := em[:emLen-sLen-hLen-2+1+sLen] + h := em[emLen-sLen-hLen-2+1+sLen : emLen-1] + + // 4. Generate a random octet string salt of length sLen; if sLen = 0, + // then salt is the empty string. + // + // 5. Let + // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt; + // + // M' is an octet string of length 8 + hLen + sLen with eight + // initial zero octets. + // + // 6. Let H = Hash(M'), an octet string of length hLen. + + var prefix [8]byte + + hash.Write(prefix[:]) + hash.Write(mHash) + hash.Write(salt) + + h = hash.Sum(h[:0]) + hash.Reset() + + // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2 + // zero octets. The length of PS may be 0. + // + // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length + // emLen - hLen - 1. + + db[emLen-sLen-hLen-2] = 0x01 + copy(db[emLen-sLen-hLen-1:], salt) + + // 9. Let dbMask = MGF(H, emLen - hLen - 1). + // + // 10. Let maskedDB = DB \xor dbMask. + + mgf1XOR(db, hash, h) + + // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in + // maskedDB to zero. + + db[0] &= (0xFF >> uint(8*emLen-emBits)) + + // 12. Let EM = maskedDB || H || 0xbc. + em[emLen-1] = 0xBC + + // 13. Output EM. + return em, nil +} + +func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error { + // 1. If the length of M is greater than the input limitation for the + // hash function (2^61 - 1 octets for SHA-1), output "inconsistent" + // and stop. + // + // 2. Let mHash = Hash(M), an octet string of length hLen. + hLen := hash.Size() + if hLen != len(mHash) { + return ErrVerification + } + + // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop. + emLen := (emBits + 7) / 8 + if emLen < hLen+sLen+2 { + return ErrVerification + } + + // 4. If the rightmost octet of EM does not have hexadecimal value + // 0xbc, output "inconsistent" and stop. + if em[len(em)-1] != 0xBC { + return ErrVerification + } + + // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and + // let H be the next hLen octets. + db := em[:emLen-hLen-1] + h := em[emLen-hLen-1 : len(em)-1] + + // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in + // maskedDB are not all equal to zero, output "inconsistent" and + // stop. + if em[0]&(0xFF<> uint(8*emLen-emBits)) + + if sLen == PSSSaltLengthAuto { + FindSaltLength: + for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- { + switch db[emLen-hLen-sLen-2] { + case 1: + break FindSaltLength + case 0: + continue + default: + return ErrVerification + } + } + if sLen < 0 { + return ErrVerification + } + } else { + // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero + // or if the octet at position emLen - hLen - sLen - 1 (the leftmost + // position is "position 1") does not have hexadecimal value 0x01, + // output "inconsistent" and stop. + for _, e := range db[:emLen-hLen-sLen-2] { + if e != 0x00 { + return ErrVerification + } + } + if db[emLen-hLen-sLen-2] != 0x01 { + return ErrVerification + } + } + + // 11. Let salt be the last sLen octets of DB. + salt := db[len(db)-sLen:] + + // 12. Let + // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ; + // M' is an octet string of length 8 + hLen + sLen with eight + // initial zero octets. + // + // 13. Let H' = Hash(M'), an octet string of length hLen. + var prefix [8]byte + hash.Write(prefix[:]) + hash.Write(mHash) + hash.Write(salt) + + h0 := hash.Sum(nil) + + // 14. If H = H', output "consistent." Otherwise, output "inconsistent." + if !bytes.Equal(h0, h) { + return ErrVerification + } + return nil +} + +// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt. +// Note that hashed must be the result of hashing the input message using the +// given hash function. salt is a random sequence of bytes whose length will be +// later used to verify the signature. +func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) { + nBits := priv.N.BitLen() + em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New()) + if err != nil { + return + } + m := new(big.Int).SetBytes(em) + c, err := decryptAndCheck(rand, priv, m) + if err != nil { + return + } + s = make([]byte, (nBits+7)/8) + copyWithLeftPad(s, c.Bytes()) + return +} + +const ( + // PSSSaltLengthAuto causes the salt in a PSS signature to be as large + // as possible when signing, and to be auto-detected when verifying. + PSSSaltLengthAuto = 0 + // PSSSaltLengthEqualsHash causes the salt length to equal the length + // of the hash used in the signature. + PSSSaltLengthEqualsHash = -1 +) + +// PSSOptions contains options for creating and verifying PSS signatures. +type PSSOptions struct { + // SaltLength controls the length of the salt used in the PSS + // signature. It can either be a number of bytes, or one of the special + // PSSSaltLength constants. + SaltLength int + + // Hash, if not zero, overrides the hash function passed to SignPSS. + // This is the only way to specify the hash function when using the + // crypto.Signer interface. + Hash crypto.Hash +} + +// HashFunc returns pssOpts.Hash so that PSSOptions implements +// crypto.SignerOpts. +func (pssOpts *PSSOptions) HashFunc() crypto.Hash { + return pssOpts.Hash +} + +func (opts *PSSOptions) saltLength() int { + if opts == nil { + return PSSSaltLengthAuto + } + return opts.SaltLength +} + +// SignPSS calculates the signature of hashed using RSASSA-PSS [1]. +// Note that hashed must be the result of hashing the input message using the +// given hash function. The opts argument may be nil, in which case sensible +// defaults are used. +func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) (s []byte, err error) { + saltLength := opts.saltLength() + switch saltLength { + case PSSSaltLengthAuto: + saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size() + case PSSSaltLengthEqualsHash: + saltLength = hash.Size() + } + + if opts != nil && opts.Hash != 0 { + hash = opts.Hash + } + + salt := make([]byte, saltLength) + if _, err = io.ReadFull(rand, salt); err != nil { + return + } + return signPSSWithSalt(rand, priv, hash, hashed, salt) +} + +// VerifyPSS verifies a PSS signature. +// hashed is the result of hashing the input message using the given hash +// function and sig is the signature. A valid signature is indicated by +// returning a nil error. The opts argument may be nil, in which case sensible +// defaults are used. +func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error { + return verifyPSS(pub, hash, hashed, sig, opts.saltLength()) +} + +// verifyPSS verifies a PSS signature with the given salt length. +func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error { + nBits := pub.N.BitLen() + if len(sig) != (nBits+7)/8 { + return ErrVerification + } + s := new(big.Int).SetBytes(sig) + m := encrypt(new(big.Int), pub, s) + emBits := nBits - 1 + emLen := (emBits + 7) / 8 + if emLen < len(m.Bytes()) { + return ErrVerification + } + em := make([]byte, emLen) + copyWithLeftPad(em, m.Bytes()) + if saltLen == PSSSaltLengthEqualsHash { + saltLen = hash.Size() + } + return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New()) +} diff --git a/vendor/github.com/keybase/go-crypto/rsa/rsa.go b/vendor/github.com/keybase/go-crypto/rsa/rsa.go new file mode 100644 index 00000000..ff6b11b3 --- /dev/null +++ b/vendor/github.com/keybase/go-crypto/rsa/rsa.go @@ -0,0 +1,646 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rsa implements RSA encryption as specified in PKCS#1. +// +// RSA is a single, fundamental operation that is used in this package to +// implement either public-key encryption or public-key signatures. +// +// The original specification for encryption and signatures with RSA is PKCS#1 +// and the terms "RSA encryption" and "RSA signatures" by default refer to +// PKCS#1 version 1.5. However, that specification has flaws and new designs +// should use version two, usually called by just OAEP and PSS, where +// possible. +// +// Two sets of interfaces are included in this package. When a more abstract +// interface isn't neccessary, there are functions for encrypting/decrypting +// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract +// over the public-key primitive, the PrivateKey struct implements the +// Decrypter and Signer interfaces from the crypto package. +package rsa + +import ( + "crypto" + "crypto/rand" + "crypto/subtle" + "errors" + "hash" + "io" + "math/big" +) + +var bigZero = big.NewInt(0) +var bigOne = big.NewInt(1) + +// A PublicKey represents the public part of an RSA key. +type PublicKey struct { + N *big.Int // modulus + E int64 // public exponent +} + +// OAEPOptions is an interface for passing options to OAEP decryption using the +// crypto.Decrypter interface. +type OAEPOptions struct { + // Hash is the hash function that will be used when generating the mask. + Hash crypto.Hash + // Label is an arbitrary byte string that must be equal to the value + // used when encrypting. + Label []byte +} + +var ( + errPublicModulus = errors.New("crypto/rsa: missing public modulus") + errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small") + errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large") +) + +// checkPub sanity checks the public key before we use it. +// We require pub.E to fit into a 32-bit integer so that we +// do not have different behavior depending on whether +// int is 32 or 64 bits. See also +// http://www.imperialviolet.org/2012/03/16/rsae.html. +func checkPub(pub *PublicKey) error { + if pub.N == nil { + return errPublicModulus + } + if pub.E < 2 { + return errPublicExponentSmall + } + if pub.E > 1<<63-1 { + return errPublicExponentLarge + } + return nil +} + +// A PrivateKey represents an RSA key +type PrivateKey struct { + PublicKey // public part. + D *big.Int // private exponent + Primes []*big.Int // prime factors of N, has >= 2 elements. + + // Precomputed contains precomputed values that speed up private + // operations, if available. + Precomputed PrecomputedValues +} + +// Public returns the public key corresponding to priv. +func (priv *PrivateKey) Public() crypto.PublicKey { + return &priv.PublicKey +} + +// Sign signs msg with priv, reading randomness from rand. If opts is a +// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will +// be used. This method is intended to support keys where the private part is +// kept in, for example, a hardware module. Common uses should use the Sign* +// functions in this package. +func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + if pssOpts, ok := opts.(*PSSOptions); ok { + return SignPSS(rand, priv, pssOpts.Hash, msg, pssOpts) + } + + return SignPKCS1v15(rand, priv, opts.HashFunc(), msg) +} + +// Decrypt decrypts ciphertext with priv. If opts is nil or of type +// *PKCS1v15DecryptOptions then PKCS#1 v1.5 decryption is performed. Otherwise +// opts must have type *OAEPOptions and OAEP decryption is done. +func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + if opts == nil { + return DecryptPKCS1v15(rand, priv, ciphertext) + } + + switch opts := opts.(type) { + case *OAEPOptions: + return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label) + + case *PKCS1v15DecryptOptions: + if l := opts.SessionKeyLen; l > 0 { + plaintext = make([]byte, l) + if _, err := io.ReadFull(rand, plaintext); err != nil { + return nil, err + } + if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil { + return nil, err + } + return plaintext, nil + } else { + return DecryptPKCS1v15(rand, priv, ciphertext) + } + + default: + return nil, errors.New("crypto/rsa: invalid options for Decrypt") + } +} + +type PrecomputedValues struct { + Dp, Dq *big.Int // D mod (P-1) (or mod Q-1) + Qinv *big.Int // Q^-1 mod P + + // CRTValues is used for the 3rd and subsequent primes. Due to a + // historical accident, the CRT for the first two primes is handled + // differently in PKCS#1 and interoperability is sufficiently + // important that we mirror this. + CRTValues []CRTValue +} + +// CRTValue contains the precomputed Chinese remainder theorem values. +type CRTValue struct { + Exp *big.Int // D mod (prime-1). + Coeff *big.Int // R·Coeff ≡ 1 mod Prime. + R *big.Int // product of primes prior to this (inc p and q). +} + +// Validate performs basic sanity checks on the key. +// It returns nil if the key is valid, or else an error describing a problem. +func (priv *PrivateKey) Validate() error { + if err := checkPub(&priv.PublicKey); err != nil { + return err + } + + // Check that Πprimes == n. + modulus := new(big.Int).Set(bigOne) + for _, prime := range priv.Primes { + // Any primes ≤ 1 will cause divide-by-zero panics later. + if prime.Cmp(bigOne) <= 0 { + return errors.New("crypto/rsa: invalid prime value") + } + modulus.Mul(modulus, prime) + } + if modulus.Cmp(priv.N) != 0 { + return errors.New("crypto/rsa: invalid modulus") + } + + // Check that de ≡ 1 mod p-1, for each prime. + // This implies that e is coprime to each p-1 as e has a multiplicative + // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) = + // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1 + // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required. + congruence := new(big.Int) + de := new(big.Int).SetInt64(int64(priv.E)) + de.Mul(de, priv.D) + for _, prime := range priv.Primes { + pminus1 := new(big.Int).Sub(prime, bigOne) + congruence.Mod(de, pminus1) + if congruence.Cmp(bigOne) != 0 { + return errors.New("crypto/rsa: invalid exponents") + } + } + return nil +} + +// GenerateKey generates an RSA keypair of the given bit size using the +// random source random (for example, crypto/rand.Reader). +func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err error) { + return GenerateMultiPrimeKey(random, 2, bits) +} + +// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit +// size and the given random source, as suggested in [1]. Although the public +// keys are compatible (actually, indistinguishable) from the 2-prime case, +// the private keys are not. Thus it may not be possible to export multi-prime +// private keys in certain formats or to subsequently import them into other +// code. +// +// Table 1 in [2] suggests maximum numbers of primes for a given size. +// +// [1] US patent 4405829 (1972, expired) +// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf +func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + g := new(big.Int) + priv.D = new(big.Int) + y := new(big.Int) + e := big.NewInt(int64(priv.E)) + g.GCD(priv.D, y, e, totient) + + if g.Cmp(bigOne) == 0 { + if priv.D.Sign() < 0 { + priv.D.Add(priv.D, totient) + } + priv.Primes = primes + priv.N = n + + break + } + } + + priv.Precompute() + return +} + +// incCounter increments a four byte, big-endian counter. +func incCounter(c *[4]byte) { + if c[3]++; c[3] != 0 { + return + } + if c[2]++; c[2] != 0 { + return + } + if c[1]++; c[1] != 0 { + return + } + c[0]++ +} + +// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function +// specified in PKCS#1 v2.1. +func mgf1XOR(out []byte, hash hash.Hash, seed []byte) { + var counter [4]byte + var digest []byte + + done := 0 + for done < len(out) { + hash.Write(seed) + hash.Write(counter[0:4]) + digest = hash.Sum(digest[:0]) + hash.Reset() + + for i := 0; i < len(digest) && done < len(out); i++ { + out[done] ^= digest[i] + done++ + } + incCounter(&counter) + } +} + +// ErrMessageTooLong is returned when attempting to encrypt a message which is +// too large for the size of the public key. +var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size") + +func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int { + e := big.NewInt(int64(pub.E)) + c.Exp(m, e, pub.N) + return c +} + +// EncryptOAEP encrypts the given message with RSA-OAEP. +// +// OAEP is parameterised by a hash function that is used as a random oracle. +// Encryption and decryption of a given message must use the same hash function +// and sha256.New() is a reasonable choice. +// +// The random parameter is used as a source of entropy to ensure that +// encrypting the same message twice doesn't result in the same ciphertext. +// +// The label parameter may contain arbitrary data that will not be encrypted, +// but which gives important context to the message. For example, if a given +// public key is used to decrypt two types of messages then distinct label +// values could be used to ensure that a ciphertext for one purpose cannot be +// used for another by an attacker. If not required it can be empty. +// +// The message must be no longer than the length of the public modulus less +// twice the hash length plus 2. +func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err error) { + if err := checkPub(pub); err != nil { + return nil, err + } + hash.Reset() + k := (pub.N.BitLen() + 7) / 8 + if len(msg) > k-2*hash.Size()-2 { + err = ErrMessageTooLong + return + } + + hash.Write(label) + lHash := hash.Sum(nil) + hash.Reset() + + em := make([]byte, k) + seed := em[1 : 1+hash.Size()] + db := em[1+hash.Size():] + + copy(db[0:hash.Size()], lHash) + db[len(db)-len(msg)-1] = 1 + copy(db[len(db)-len(msg):], msg) + + _, err = io.ReadFull(random, seed) + if err != nil { + return + } + + mgf1XOR(db, hash, seed) + mgf1XOR(seed, hash, db) + + m := new(big.Int) + m.SetBytes(em) + c := encrypt(new(big.Int), pub, m) + out = c.Bytes() + + if len(out) < k { + // If the output is too small, we need to left-pad with zeros. + t := make([]byte, k) + copy(t[k-len(out):], out) + out = t + } + + return +} + +// ErrDecryption represents a failure to decrypt a message. +// It is deliberately vague to avoid adaptive attacks. +var ErrDecryption = errors.New("crypto/rsa: decryption error") + +// ErrVerification represents a failure to verify a signature. +// It is deliberately vague to avoid adaptive attacks. +var ErrVerification = errors.New("crypto/rsa: verification error") + +// modInverse returns ia, the inverse of a in the multiplicative group of prime +// order n. It requires that a be a member of the group (i.e. less than n). +func modInverse(a, n *big.Int) (ia *big.Int, ok bool) { + g := new(big.Int) + x := new(big.Int) + y := new(big.Int) + g.GCD(x, y, a, n) + if g.Cmp(bigOne) != 0 { + // In this case, a and n aren't coprime and we cannot calculate + // the inverse. This happens because the values of n are nearly + // prime (being the product of two primes) rather than truly + // prime. + return + } + + if x.Cmp(bigOne) < 0 { + // 0 is not the multiplicative inverse of any element so, if x + // < 1, then x is negative. + x.Add(x, n) + } + + return x, true +} + +// Precompute performs some calculations that speed up private key operations +// in the future. +func (priv *PrivateKey) Precompute() { + if priv.Precomputed.Dp != nil { + return + } + + priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne) + priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp) + + priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne) + priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq) + + priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0]) + + r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1]) + priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2) + for i := 2; i < len(priv.Primes); i++ { + prime := priv.Primes[i] + values := &priv.Precomputed.CRTValues[i-2] + + values.Exp = new(big.Int).Sub(prime, bigOne) + values.Exp.Mod(priv.D, values.Exp) + + values.R = new(big.Int).Set(r) + values.Coeff = new(big.Int).ModInverse(r, prime) + + r.Mul(r, prime) + } +} + +// decrypt performs an RSA decryption, resulting in a plaintext integer. If a +// random source is given, RSA blinding is used. +func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { + // TODO(agl): can we get away with reusing blinds? + if c.Cmp(priv.N) > 0 { + err = ErrDecryption + return + } + + var ir *big.Int + if random != nil { + // Blinding enabled. Blinding involves multiplying c by r^e. + // Then the decryption operation performs (m^e * r^e)^d mod n + // which equals mr mod n. The factor of r can then be removed + // by multiplying by the multiplicative inverse of r. + + var r *big.Int + + for { + r, err = rand.Int(random, priv.N) + if err != nil { + return + } + if r.Cmp(bigZero) == 0 { + r = bigOne + } + var ok bool + ir, ok = modInverse(r, priv.N) + if ok { + break + } + } + bigE := big.NewInt(int64(priv.E)) + rpowe := new(big.Int).Exp(r, bigE, priv.N) + cCopy := new(big.Int).Set(c) + cCopy.Mul(cCopy, rpowe) + cCopy.Mod(cCopy, priv.N) + c = cCopy + } + + if priv.Precomputed.Dp == nil { + m = new(big.Int).Exp(c, priv.D, priv.N) + } else { + // We have the precalculated values needed for the CRT. + m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0]) + m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1]) + m.Sub(m, m2) + if m.Sign() < 0 { + m.Add(m, priv.Primes[0]) + } + m.Mul(m, priv.Precomputed.Qinv) + m.Mod(m, priv.Primes[0]) + m.Mul(m, priv.Primes[1]) + m.Add(m, m2) + + for i, values := range priv.Precomputed.CRTValues { + prime := priv.Primes[2+i] + m2.Exp(c, values.Exp, prime) + m2.Sub(m2, m) + m2.Mul(m2, values.Coeff) + m2.Mod(m2, prime) + if m2.Sign() < 0 { + m2.Add(m2, prime) + } + m2.Mul(m2, values.R) + m.Add(m, m2) + } + } + + if ir != nil { + // Unblind. + m.Mul(m, ir) + m.Mod(m, priv.N) + } + + return +} + +func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { + m, err = decrypt(random, priv, c) + if err != nil { + return nil, err + } + + // In order to defend against errors in the CRT computation, m^e is + // calculated, which should match the original ciphertext. + check := encrypt(new(big.Int), &priv.PublicKey, m) + if c.Cmp(check) != 0 { + return nil, errors.New("rsa: internal error") + } + return m, nil +} + +// DecryptOAEP decrypts ciphertext using RSA-OAEP. + +// OAEP is parameterised by a hash function that is used as a random oracle. +// Encryption and decryption of a given message must use the same hash function +// and sha256.New() is a reasonable choice. +// +// The random parameter, if not nil, is used to blind the private-key operation +// and avoid timing side-channel attacks. Blinding is purely internal to this +// function – the random data need not match that used when encrypting. +// +// The label parameter must match the value given when encrypting. See +// EncryptOAEP for details. +func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return nil, err + } + k := (priv.N.BitLen() + 7) / 8 + if len(ciphertext) > k || + k < hash.Size()*2+2 { + err = ErrDecryption + return + } + + c := new(big.Int).SetBytes(ciphertext) + + m, err := decrypt(random, priv, c) + if err != nil { + return + } + + hash.Write(label) + lHash := hash.Sum(nil) + hash.Reset() + + // Converting the plaintext number to bytes will strip any + // leading zeros so we may have to left pad. We do this unconditionally + // to avoid leaking timing information. (Although we still probably + // leak the number of leading zeros. It's not clear that we can do + // anything about this.) + em := leftPad(m.Bytes(), k) + + firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + + seed := em[1 : hash.Size()+1] + db := em[hash.Size()+1:] + + mgf1XOR(seed, hash, db) + mgf1XOR(db, hash, seed) + + lHash2 := db[0:hash.Size()] + + // We have to validate the plaintext in constant time in order to avoid + // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal + // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1 + // v2.0. In J. Kilian, editor, Advances in Cryptology. + lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2) + + // The remainder of the plaintext must be zero or more 0x00, followed + // by 0x01, followed by the message. + // lookingForIndex: 1 iff we are still looking for the 0x01 + // index: the offset of the first 0x01 byte + // invalid: 1 iff we saw a non-zero byte before the 0x01. + var lookingForIndex, index, invalid int + lookingForIndex = 1 + rest := db[hash.Size():] + + for i := 0; i < len(rest); i++ { + equals0 := subtle.ConstantTimeByteEq(rest[i], 0) + equals1 := subtle.ConstantTimeByteEq(rest[i], 1) + index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex) + invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid) + } + + if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 { + err = ErrDecryption + return + } + + msg = rest[index+1:] + return +} + +// leftPad returns a new slice of length size. The contents of input are right +// aligned in the new slice. +func leftPad(input []byte, size int) (out []byte) { + n := len(input) + if n > size { + n = size + } + out = make([]byte, size) + copy(out[len(out)-n:], input) + return +} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 00000000..0f1d00e1 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100644 index 00000000..ebf44703 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 00000000..8396f5d9 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,44 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - master + +sudo: true + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + matrix: + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1 + +before_script: + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - staticcheck -go 1.11 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 00000000..84c937f1 --- /dev/null +++ b/vendor/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 00000000..5773904a --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 00000000..385fe735 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,95 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) +[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (uhoh-itsmaciek) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 00000000..f0502111 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 00000000..e4933e22 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,756 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 00000000..4b0a0a8f --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 00000000..55152b12 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1923 @@ +package pq + +import ( + "bufio" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d *Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := c.opts + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if network == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' && res.colNames == nil { + res.result, res.tag = cn.parseComplete(r.string()) + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 00000000..0fdd06a6 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,149 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery("SELECT 'lib/pq ping test';") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + _ = cn.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + c, err := dial(ctx, cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 00000000..2f8ced67 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,110 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returnst the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 00000000..345c2398 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,282 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad() + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad() + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad() + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.bad = true + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.bad + ci.Unlock() + return b +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.isBad() { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + return nil, ci.Close() + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.isBad() { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 00000000..2a60054e --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,245 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 00000000..a6902fae --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,602 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 00000000..3d66ba7c --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,515 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.bad = true + *err = v + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.bad = true + } +} diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod new file mode 100644 index 00000000..edf0b343 --- /dev/null +++ b/vendor/github.com/lib/pq/go.mod @@ -0,0 +1 @@ +module github.com/lib/pq diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 00000000..850bb904 --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,797 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 00000000..caaede24 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 00000000..ecc84c2c --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 00000000..c6aa5b9a --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 00000000..484f378a --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that ocurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 00000000..d9020845 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,175 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 00000000..3b7c3a2a --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 00000000..5d2c763c --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 00000000..f4d8a7c2 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 00000000..bf982524 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 00000000..2b691267 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 00000000..9a1b9e07 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 00000000..98db8f06 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000..56729a92 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000..0b0aef83 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000..3fb771dc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000..1bd628f2 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1005 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 00000000..ef3ca9d4 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.8 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 00000000..2c12960e --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000..95f2c6be --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 00000000..5597e026 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000..1e69004b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 00000000..a8ddf404 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20191008105621-543471e840be + +go 1.14 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 00000000..c141fc53 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,4 @@ +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 00000000..d3567cb5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..07e93039 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000..ff714a37 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000..bc0a7092 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(fd) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..bdd5c79a --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000..453b025d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000..1fa86915 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 00000000..5d8cb5b7 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore new file mode 100644 index 00000000..e16fb946 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore @@ -0,0 +1 @@ +cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile new file mode 100644 index 00000000..81be2143 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 00000000..258c0636 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 00000000..c318385c --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 00000000..8fb59ad2 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml new file mode 100644 index 00000000..b8599b3a --- /dev/null +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -0,0 +1,14 @@ +sudo: false + +language: go + +go: + - "1.8" + - "1.9" + - "1.10" + +branches: + only: + - master + +script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile new file mode 100644 index 00000000..4874b008 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -0,0 +1,20 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code +test: + go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) + +# testrace runs the race checker +testrace: + go list $(TEST) | xargs -n1 go test -race $(TESTARGS) + +# updatedeps installs all the dependencies to run and build +updatedeps: + go list ./... \ + | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ + | grep -v github.com/mitchellh/cli \ + | xargs go get -f -u -v + +.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 00000000..8f02cdd0 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,67 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) + +cli is a library for implementing powerful command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Serf](https://github.com/hashicorp/serf), +[Consul](https://github.com/hashicorp/consul), +[Vault](https://github.com/hashicorp/vault), +[Terraform](https://github.com/hashicorp/terraform), and +[Nomad](https://github.com/hashicorp/nomad). + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + +* Automatic help generation for listing subcommands + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 00000000..3bec6258 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 00000000..c2dbe55a --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,720 @@ +package cli + +import ( + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports bash, zsh and fish. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Ignore the empty key which can be present for default commands. + if k == "" { + return false + } + + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one has a space. This + // disallows commands like: ./cli foo "bar baz". An argument + // with a space is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 00000000..bed11faf --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 00000000..7a584b7e --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/go.mod b/vendor/github.com/mitchellh/cli/go.mod new file mode 100644 index 00000000..675325ff --- /dev/null +++ b/vendor/github.com/mitchellh/cli/go.mod @@ -0,0 +1,12 @@ +module github.com/mitchellh/cli + +require ( + github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 + github.com/bgentry/speakeasy v0.1.0 + github.com/fatih/color v1.7.0 + github.com/hashicorp/go-multierror v1.0.0 // indirect + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.3 + github.com/posener/complete v1.1.1 + golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc // indirect +) diff --git a/vendor/github.com/mitchellh/cli/go.sum b/vendor/github.com/mitchellh/cli/go.sum new file mode 100644 index 00000000..03708752 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/go.sum @@ -0,0 +1,22 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 00000000..f5ca58f5 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 00000000..a2d6f94f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 00000000..b0ec4484 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,73 @@ +package cli + +import ( + "github.com/fatih/color" +) + +const ( + noColor = -1 +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{noColor, false} + UiColorRed = UiColor{int(color.FgHiRed), false} + UiColorGreen = UiColor{int(color.FgHiGreen), false} + UiColorYellow = UiColor{int(color.FgHiYellow), false} + UiColorBlue = UiColor{int(color.FgHiBlue), false} + UiColorMagenta = UiColor{int(color.FgHiMagenta), false} + UiColorCyan = UiColor{int(color.FgHiCyan), false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, uc UiColor) string { + if uc.Code == noColor { + return message + } + + attr := []color.Attribute{color.Attribute(uc.Code)} + if uc.Bold { + attr = append(attr, color.Bold) + } + + return color.New(attr...).SprintFunc()(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 00000000..b4f4dbfa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 00000000..0bfe0a19 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,111 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 00000000..1e1db3cf --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 00000000..d7b9589a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 00000000..22985159 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 00000000..bcb8c8d2 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 00000000..db6a6aa1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 00000000..14043525 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,548 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 00000000..d0186430 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/copystructure + +require github.com/mitchellh/reflectwalk v1.0.0 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 00000000..be572456 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 00000000..928d000e --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000..a3866a29 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 00000000..26781bba --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod new file mode 100644 index 00000000..062796de --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-testing-interface diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000..204afb42 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 00000000..31b42cad --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 00000000..ac82cd2e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 00000000..52bb7c46 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 00000000..6a7f1761 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 00000000..70760cf4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 00000000..3a93a0b1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,402 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 00000000..a1338d68 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 00000000..362bdd41 --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 00000000..a7228cd9 --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 00000000..832d47dd --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS b/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS new file mode 100644 index 00000000..2b16e997 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS @@ -0,0 +1,9 @@ +This is a list of people who have contributed code to go-cache. They, or their +employers, are the copyright holders of the contributed code. Contributed code +is subject to the license restrictions listed in LICENSE (as they were when the +code was contributed.) + +Dustin Sallings +Jason Mooberry +Sergey Shepelev +Alex Edwards diff --git a/vendor/github.com/patrickmn/go-cache/LICENSE b/vendor/github.com/patrickmn/go-cache/LICENSE new file mode 100644 index 00000000..db9903c7 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/patrickmn/go-cache/README.md b/vendor/github.com/patrickmn/go-cache/README.md new file mode 100644 index 00000000..c5789cc6 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/README.md @@ -0,0 +1,83 @@ +# go-cache + +go-cache is an in-memory key:value store/cache similar to memcached that is +suitable for applications running on a single machine. Its major advantage is +that, being essentially a thread-safe `map[string]interface{}` with expiration +times, it doesn't need to serialize or transmit its contents over the network. + +Any object can be stored, for a given duration or forever, and the cache can be +safely used by multiple goroutines. + +Although go-cache isn't meant to be used as a persistent datastore, the entire +cache can be saved to and loaded from a file (using `c.Items()` to retrieve the +items map to serialize, and `NewFrom()` to create a cache from a deserialized +one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) + +### Installation + +`go get github.com/patrickmn/go-cache` + +### Usage + +```go +import ( + "fmt" + "github.com/patrickmn/go-cache" + "time" +) + +func main() { + // Create a cache with a default expiration time of 5 minutes, and which + // purges expired items every 10 minutes + c := cache.New(5*time.Minute, 10*time.Minute) + + // Set the value of the key "foo" to "bar", with the default expiration time + c.Set("foo", "bar", cache.DefaultExpiration) + + // Set the value of the key "baz" to 42, with no expiration time + // (the item won't be removed until it is re-set, or removed using + // c.Delete("baz") + c.Set("baz", 42, cache.NoExpiration) + + // Get the string associated with the key "foo" from the cache + foo, found := c.Get("foo") + if found { + fmt.Println(foo) + } + + // Since Go is statically typed, and cache values can be anything, type + // assertion is needed when values are being passed to functions that don't + // take arbitrary types, (i.e. interface{}). The simplest way to do this for + // values which will only be used once--e.g. for passing to another + // function--is: + foo, found := c.Get("foo") + if found { + MyFunction(foo.(string)) + } + + // This gets tedious if the value is used several times in the same function. + // You might do either of the following instead: + if x, found := c.Get("foo"); found { + foo := x.(string) + // ... + } + // or + var foo string + if x, found := c.Get("foo"); found { + foo = x.(string) + } + // ... + // foo can then be passed around freely as a string + + // Want performance? Store pointers! + c.Set("foo", &MyStruct, cache.DefaultExpiration) + if x, found := c.Get("foo"); found { + foo := x.(*MyStruct) + // ... + } +} +``` + +### Reference + +`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache) diff --git a/vendor/github.com/patrickmn/go-cache/cache.go b/vendor/github.com/patrickmn/go-cache/cache.go new file mode 100644 index 00000000..db88d2f2 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/cache.go @@ -0,0 +1,1161 @@ +package cache + +import ( + "encoding/gob" + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +type Item struct { + Object interface{} + Expiration int64 +} + +// Returns true if the item has expired. +func (item Item) Expired() bool { + if item.Expiration == 0 { + return false + } + return time.Now().UnixNano() > item.Expiration +} + +const ( + // For use with functions that take an expiration time. + NoExpiration time.Duration = -1 + // For use with functions that take an expiration time. Equivalent to + // passing in the same expiration duration as was given to New() or + // NewFrom() when the cache was created (e.g. 5 minutes.) + DefaultExpiration time.Duration = 0 +) + +type Cache struct { + *cache + // If this is confusing, see the comment at the bottom of New() +} + +type cache struct { + defaultExpiration time.Duration + items map[string]Item + mu sync.RWMutex + onEvicted func(string, interface{}) + janitor *janitor +} + +// Add an item to the cache, replacing any existing item. If the duration is 0 +// (DefaultExpiration), the cache's default expiration time is used. If it is -1 +// (NoExpiration), the item never expires. +func (c *cache) Set(k string, x interface{}, d time.Duration) { + // "Inlining" of set + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + } + // TODO: Calls to mu.Unlock are currently not deferred because defer + // adds ~200 ns (as of go1.) + c.mu.Unlock() +} + +func (c *cache) set(k string, x interface{}, d time.Duration) { + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.items[k] = Item{ + Object: x, + Expiration: e, + } +} + +// Add an item to the cache, replacing any existing item, using the default +// expiration. +func (c *cache) SetDefault(k string, x interface{}) { + c.Set(k, x, DefaultExpiration) +} + +// Add an item to the cache only if an item doesn't already exist for the given +// key, or if the existing item has expired. Returns an error otherwise. +func (c *cache) Add(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if found { + c.mu.Unlock() + return fmt.Errorf("Item %s already exists", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Set a new value for the cache key only if it already exists, and the existing +// item hasn't expired. Returns an error otherwise. +func (c *cache) Replace(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if !found { + c.mu.Unlock() + return fmt.Errorf("Item %s doesn't exist", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Get an item from the cache. Returns the item or nil, and a bool indicating +// whether the key was found. +func (c *cache) Get(k string) (interface{}, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, false + } + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, false + } + } + c.mu.RUnlock() + return item.Object, true +} + +// GetWithExpiration returns an item and its expiration time from the cache. +// It returns the item or nil, the expiration time if one is set (if the item +// never expires a zero value for time.Time is returned), and a bool indicating +// whether the key was found. +func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + // Return the item and the expiration time + c.mu.RUnlock() + return item.Object, time.Unix(0, item.Expiration), true + } + + // If expiration <= 0 (i.e. no expiration time set) then return the item + // and a zeroed time.Time + c.mu.RUnlock() + return item.Object, time.Time{}, true +} + +func (c *cache) get(k string) (interface{}, bool) { + item, found := c.items[k] + if !found { + return nil, false + } + // "Inlining" of Expired + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + return nil, false + } + } + return item.Object, true +} + +// Increment an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to increment it by n. To retrieve the incremented value, use one +// of the specialized methods, e.g. IncrementInt64. +func (c *cache) Increment(k string, n int64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) + int(n) + case int8: + v.Object = v.Object.(int8) + int8(n) + case int16: + v.Object = v.Object.(int16) + int16(n) + case int32: + v.Object = v.Object.(int32) + int32(n) + case int64: + v.Object = v.Object.(int64) + n + case uint: + v.Object = v.Object.(uint) + uint(n) + case uintptr: + v.Object = v.Object.(uintptr) + uintptr(n) + case uint8: + v.Object = v.Object.(uint8) + uint8(n) + case uint16: + v.Object = v.Object.(uint16) + uint16(n) + case uint32: + v.Object = v.Object.(uint32) + uint32(n) + case uint64: + v.Object = v.Object.(uint64) + uint64(n) + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to increment it by n. Pass a negative number to decrement the +// value. To retrieve the incremented value, use one of the specialized methods, +// e.g. IncrementFloat64. +func (c *cache) IncrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint8 by n. Returns an error if the item's value +// is not an uint8, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to decrement it by n. To retrieve the decremented value, use one +// of the specialized methods, e.g. DecrementInt64. +func (c *cache) Decrement(k string, n int64) error { + // TODO: Implement Increment and Decrement more cleanly. + // (Cannot do Increment(k, n*-1) for uints.) + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item not found") + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) - int(n) + case int8: + v.Object = v.Object.(int8) - int8(n) + case int16: + v.Object = v.Object.(int16) - int16(n) + case int32: + v.Object = v.Object.(int32) - int32(n) + case int64: + v.Object = v.Object.(int64) - n + case uint: + v.Object = v.Object.(uint) - uint(n) + case uintptr: + v.Object = v.Object.(uintptr) - uintptr(n) + case uint8: + v.Object = v.Object.(uint8) - uint8(n) + case uint16: + v.Object = v.Object.(uint16) - uint16(n) + case uint32: + v.Object = v.Object.(uint32) - uint32(n) + case uint64: + v.Object = v.Object.(uint64) - uint64(n) + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to decrement it by n. Pass a negative number to decrement the +// value. To retrieve the decremented value, use one of the specialized methods, +// e.g. DecrementFloat64. +func (c *cache) DecrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint8 by n. Returns an error if the item's value is +// not an uint8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Delete an item from the cache. Does nothing if the key is not in the cache. +func (c *cache) Delete(k string) { + c.mu.Lock() + v, evicted := c.delete(k) + c.mu.Unlock() + if evicted { + c.onEvicted(k, v) + } +} + +func (c *cache) delete(k string) (interface{}, bool) { + if c.onEvicted != nil { + if v, found := c.items[k]; found { + delete(c.items, k) + return v.Object, true + } + } + delete(c.items, k) + return nil, false +} + +type keyAndValue struct { + key string + value interface{} +} + +// Delete all expired items from the cache. +func (c *cache) DeleteExpired() { + var evictedItems []keyAndValue + now := time.Now().UnixNano() + c.mu.Lock() + for k, v := range c.items { + // "Inlining" of expired + if v.Expiration > 0 && now > v.Expiration { + ov, evicted := c.delete(k) + if evicted { + evictedItems = append(evictedItems, keyAndValue{k, ov}) + } + } + } + c.mu.Unlock() + for _, v := range evictedItems { + c.onEvicted(v.key, v.value) + } +} + +// Sets an (optional) function that is called with the key and value when an +// item is evicted from the cache. (Including when it is deleted manually, but +// not when it is overwritten.) Set to nil to disable. +func (c *cache) OnEvicted(f func(string, interface{})) { + c.mu.Lock() + c.onEvicted = f + c.mu.Unlock() +} + +// Write the cache's items (using Gob) to an io.Writer. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Save(w io.Writer) (err error) { + enc := gob.NewEncoder(w) + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("Error registering item types with Gob library") + } + }() + c.mu.RLock() + defer c.mu.RUnlock() + for _, v := range c.items { + gob.Register(v.Object) + } + err = enc.Encode(&c.items) + return +} + +// Save the cache's items to the given filename, creating the file if it +// doesn't exist, and overwriting it if it does. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) SaveFile(fname string) error { + fp, err := os.Create(fname) + if err != nil { + return err + } + err = c.Save(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Add (Gob-serialized) cache items from an io.Reader, excluding any items with +// keys that already exist (and haven't expired) in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Load(r io.Reader) error { + dec := gob.NewDecoder(r) + items := map[string]Item{} + err := dec.Decode(&items) + if err == nil { + c.mu.Lock() + defer c.mu.Unlock() + for k, v := range items { + ov, found := c.items[k] + if !found || ov.Expired() { + c.items[k] = v + } + } + } + return err +} + +// Load and add cache items from the given filename, excluding any items with +// keys that already exist in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) LoadFile(fname string) error { + fp, err := os.Open(fname) + if err != nil { + return err + } + err = c.Load(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Copies all unexpired items in the cache into a new map and returns it. +func (c *cache) Items() map[string]Item { + c.mu.RLock() + defer c.mu.RUnlock() + m := make(map[string]Item, len(c.items)) + now := time.Now().UnixNano() + for k, v := range c.items { + // "Inlining" of Expired + if v.Expiration > 0 { + if now > v.Expiration { + continue + } + } + m[k] = v + } + return m +} + +// Returns the number of items in the cache. This may include items that have +// expired, but have not yet been cleaned up. +func (c *cache) ItemCount() int { + c.mu.RLock() + n := len(c.items) + c.mu.RUnlock() + return n +} + +// Delete all items from the cache. +func (c *cache) Flush() { + c.mu.Lock() + c.items = map[string]Item{} + c.mu.Unlock() +} + +type janitor struct { + Interval time.Duration + stop chan bool +} + +func (j *janitor) Run(c *cache) { + ticker := time.NewTicker(j.Interval) + for { + select { + case <-ticker.C: + c.DeleteExpired() + case <-j.stop: + ticker.Stop() + return + } + } +} + +func stopJanitor(c *Cache) { + c.janitor.stop <- true +} + +func runJanitor(c *cache, ci time.Duration) { + j := &janitor{ + Interval: ci, + stop: make(chan bool), + } + c.janitor = j + go j.Run(c) +} + +func newCache(de time.Duration, m map[string]Item) *cache { + if de == 0 { + de = -1 + } + c := &cache{ + defaultExpiration: de, + items: m, + } + return c +} + +func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache { + c := newCache(de, m) + // This trick ensures that the janitor goroutine (which--granted it + // was enabled--is running DeleteExpired on c forever) does not keep + // the returned C object from being garbage collected. When it is + // garbage collected, the finalizer stops the janitor goroutine, after + // which c can be collected. + C := &Cache{c} + if ci > 0 { + runJanitor(c, ci) + runtime.SetFinalizer(C, stopJanitor) + } + return C +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +func New(defaultExpiration, cleanupInterval time.Duration) *Cache { + items := make(map[string]Item) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +// +// NewFrom() also accepts an items map which will serve as the underlying map +// for the cache. This is useful for starting from a deserialized cache +// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g. +// make(map[string]Item, 500) to improve startup performance when the cache +// is expected to reach a certain minimum size. +// +// Only the cache's methods synchronize access to this map, so it is not +// recommended to keep any references to the map around after creating a cache. +// If need be, the map can be accessed at a later point using c.Items() (subject +// to the same caveat.) +// +// Note regarding serialization: When using e.g. gob, make sure to +// gob.Register() the individual types stored in the cache before encoding a +// map retrieved with c.Items(), and to register those same types before +// decoding a blob containing an items map. +func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache { + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} diff --git a/vendor/github.com/patrickmn/go-cache/sharded.go b/vendor/github.com/patrickmn/go-cache/sharded.go new file mode 100644 index 00000000..bcc0538b --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/sharded.go @@ -0,0 +1,192 @@ +package cache + +import ( + "crypto/rand" + "math" + "math/big" + insecurerand "math/rand" + "os" + "runtime" + "time" +) + +// This is an experimental and unexported (for now) attempt at making a cache +// with better algorithmic complexity than the standard one, namely by +// preventing write locks of the entire cache when an item is added. As of the +// time of writing, the overhead of selecting buckets results in cache +// operations being about twice as slow as for the standard cache with small +// total cache sizes, and faster for larger ones. +// +// See cache_test.go for a few benchmarks. + +type unexportedShardedCache struct { + *shardedCache +} + +type shardedCache struct { + seed uint32 + m uint32 + cs []*cache + janitor *shardedJanitor +} + +// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead. +func djb33(seed uint32, k string) uint32 { + var ( + l = uint32(len(k)) + d = 5381 + seed + l + i = uint32(0) + ) + // Why is all this 5x faster than a for loop? + if l >= 4 { + for i < l-4 { + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + d = (d * 33) ^ uint32(k[i+3]) + i += 4 + } + } + switch l - i { + case 1: + case 2: + d = (d * 33) ^ uint32(k[i]) + case 3: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + case 4: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + } + return d ^ (d >> 16) +} + +func (sc *shardedCache) bucket(k string) *cache { + return sc.cs[djb33(sc.seed, k)%sc.m] +} + +func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) { + sc.bucket(k).Set(k, x, d) +} + +func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Add(k, x, d) +} + +func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Replace(k, x, d) +} + +func (sc *shardedCache) Get(k string) (interface{}, bool) { + return sc.bucket(k).Get(k) +} + +func (sc *shardedCache) Increment(k string, n int64) error { + return sc.bucket(k).Increment(k, n) +} + +func (sc *shardedCache) IncrementFloat(k string, n float64) error { + return sc.bucket(k).IncrementFloat(k, n) +} + +func (sc *shardedCache) Decrement(k string, n int64) error { + return sc.bucket(k).Decrement(k, n) +} + +func (sc *shardedCache) Delete(k string) { + sc.bucket(k).Delete(k) +} + +func (sc *shardedCache) DeleteExpired() { + for _, v := range sc.cs { + v.DeleteExpired() + } +} + +// Returns the items in the cache. This may include items that have expired, +// but have not yet been cleaned up. If this is significant, the Expiration +// fields of the items should be checked. Note that explicit synchronization +// is needed to use a cache and its corresponding Items() return values at +// the same time, as the maps are shared. +func (sc *shardedCache) Items() []map[string]Item { + res := make([]map[string]Item, len(sc.cs)) + for i, v := range sc.cs { + res[i] = v.Items() + } + return res +} + +func (sc *shardedCache) Flush() { + for _, v := range sc.cs { + v.Flush() + } +} + +type shardedJanitor struct { + Interval time.Duration + stop chan bool +} + +func (j *shardedJanitor) Run(sc *shardedCache) { + j.stop = make(chan bool) + tick := time.Tick(j.Interval) + for { + select { + case <-tick: + sc.DeleteExpired() + case <-j.stop: + return + } + } +} + +func stopShardedJanitor(sc *unexportedShardedCache) { + sc.janitor.stop <- true +} + +func runShardedJanitor(sc *shardedCache, ci time.Duration) { + j := &shardedJanitor{ + Interval: ci, + } + sc.janitor = j + go j.Run(sc) +} + +func newShardedCache(n int, de time.Duration) *shardedCache { + max := big.NewInt(0).SetUint64(uint64(math.MaxUint32)) + rnd, err := rand.Int(rand.Reader, max) + var seed uint32 + if err != nil { + os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n")) + seed = insecurerand.Uint32() + } else { + seed = uint32(rnd.Uint64()) + } + sc := &shardedCache{ + seed: seed, + m: uint32(n), + cs: make([]*cache, n), + } + for i := 0; i < n; i++ { + c := &cache{ + defaultExpiration: de, + items: map[string]Item{}, + } + sc.cs[i] = c + } + return sc +} + +func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache { + if defaultExpiration == 0 { + defaultExpiration = -1 + } + sc := newShardedCache(shards, defaultExpiration) + SC := &unexportedShardedCache{sc} + if cleanupInterval > 0 { + runShardedJanitor(sc, cleanupInterval) + runtime.SetFinalizer(SC, stopShardedJanitor) + } + return SC +} diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore new file mode 100644 index 00000000..293955f9 --- /dev/null +++ b/vendor/github.com/posener/complete/.gitignore @@ -0,0 +1,4 @@ +.idea +coverage.txt +gocomplete/gocomplete +example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml new file mode 100644 index 00000000..2fae9454 --- /dev/null +++ b/vendor/github.com/posener/complete/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.11 + - 1.10.x + - 1.9 + - 1.8 + +before_install: + - go get -u -t ./... + +script: + - GO111MODULE=on ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 00000000..16249b4a --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 00000000..17ab2c6d --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,111 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. +func splitFields(line string) []string { + parts := strings.Fields(line) + + // Add empty field if the last field was completed. + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + + // Treat the last field if it is of the form "a=b" + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +func (a Args) from(i int) Args { + if i > len(a.All) { + i = len(a.All) + } + a.All = a.All[i:] + + if i > len(a.Completed) { + i = len(a.Completed) + } + a.Completed = a.Completed[i:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 00000000..b99fe529 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 00000000..a287f998 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,32 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) Install(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if lineInFile(b.rc, completeCmd) { + return fmt.Errorf("already installed in %s", b.rc) + } + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if !lineInFile(b.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go new file mode 100644 index 00000000..6467196b --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/fish.go @@ -0,0 +1,56 @@ +package install + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "text/template" +) + +// (un)install in fish + +type fish struct { + configDir string +} + +func (f fish) Install(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } + if _, err := os.Stat(completionFile); err == nil { + return fmt.Errorf("already installed at %s", completionFile) + } + + return createFile(completionFile, completeCmd) +} + +func (f fish) Uninstall(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + if _, err := os.Stat(completionFile); err != nil { + return fmt.Errorf("does not installed in %s", f.configDir) + } + + return os.Remove(completionFile) +} + +func (f fish) cmd(cmd, bin string) (string, error) { + var buf bytes.Buffer + params := struct{ Cmd, Bin string }{cmd, bin} + tmpl := template.Must(template.New("cmd").Parse(` +function __complete_{{.Cmd}} + set -lx COMP_LINE (string join ' ' (commandline -o)) + test (commandline -ct) = "" + and set COMP_LINE "$COMP_LINE " + {{.Bin}} +end +complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 00000000..dfa1963b --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,119 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + if d := fishConfigDir(); d != "" { + i = append(i, fish{d}) + } + return +} + +func fishConfigDir() string { + configDir := filepath.Join(getConfigHomePath(), "fish") + if configDir == "" { + return "" + } + if info, err := os.Stat(configDir); err != nil || !info.IsDir() { + return "" + } + return configDir +} + +func getConfigHomePath() string { + u, err := user.Current() + if err != nil { + return "" + } + + configHome := os.Getenv("XDG_CONFIG_HOME") + if configHome == "" { + return filepath.Join(u.HomeDir, ".config") + } + return configHome +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 00000000..d34ac8ca --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,140 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func createFile(name string, content string) error { + // make sure file directory exists + if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { + return err + } + + // create the file + f, err := os.Create(name) + if err != nil { + return err + } + defer f.Close() + + // write file content + _, err = f.WriteString(fmt.Sprintf("%s\n", content)) + return err +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 00000000..a625f53c --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,39 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) Install(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if lineInFile(z.rc, completeCmd) { + return fmt.Errorf("already installed in %s", z.rc) + } + + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if !lineInFile(z.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 00000000..82d37d52 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 00000000..725c4deb --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,109 @@ +// Package complete provides a tool for bash writing bash completion in go. +// +// Writing bash completion scripts is a hard work. This package provides an easy way +// to create bash completion scripts for any command, and also an easy way to install/uninstall +// the completion of the command. +package complete + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + + "github.com/posener/complete/cmd" + "github.com/posener/complete/match" +) + +const ( + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, point, ok := getEnv() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if match.Prefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) + if line == "" { + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) + } + return line, point, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod new file mode 100644 index 00000000..fef0c440 --- /dev/null +++ b/vendor/github.com/posener/complete/go.mod @@ -0,0 +1,3 @@ +module github.com/posener/complete + +require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum new file mode 100644 index 00000000..d2f13301 --- /dev/null +++ b/vendor/github.com/posener/complete/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 00000000..c3029556 --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,22 @@ +package complete + +import ( + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go new file mode 100644 index 00000000..051171e8 --- /dev/null +++ b/vendor/github.com/posener/complete/match/file.go @@ -0,0 +1,19 @@ +package match + +import "strings" + +// File returns true if prefix can match the file +func File(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go new file mode 100644 index 00000000..812fcac9 --- /dev/null +++ b/vendor/github.com/posener/complete/match/match.go @@ -0,0 +1,6 @@ +package match + +// Match matches two strings +// it is used for comparing a term to the last typed +// word, the prefix, and see if it is a possible auto complete option. +type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go new file mode 100644 index 00000000..9a01ba63 --- /dev/null +++ b/vendor/github.com/posener/complete/match/prefix.go @@ -0,0 +1,9 @@ +package match + +import "strings" + +// Prefix is a simple Matcher, if the word is it's prefix, there is a match +// Match returns true if a has the prefix as prefix +func Prefix(long, prefix string) bool { + return strings.HasPrefix(long, prefix) +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 00000000..82070632 --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 00000000..c8adf7e8 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,108 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/posener/complete/match" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := a.Directory() + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if match.File(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 00000000..fa4a34ae --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md new file mode 100644 index 00000000..6d757ef8 --- /dev/null +++ b/vendor/github.com/posener/complete/readme.md @@ -0,0 +1,118 @@ +# complete + +A tool for bash writing bash completion in go, and bash completion for the go command line. + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +## go command bash completion + +In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see [Usage](#usage). + +### Install + +1. Type in your shell: +``` +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +## complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. + +Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh new file mode 100644 index 00000000..56bfcf15 --- /dev/null +++ b/vendor/github.com/posener/complete/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go new file mode 100644 index 00000000..58b8b792 --- /dev/null +++ b/vendor/github.com/posener/complete/utils.go @@ -0,0 +1,46 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" +) + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 00000000..dd878a30 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 00000000..3460f034 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 00000000..44986bff --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 00000000..1e839650 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 00000000..d463e36d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 00000000..1d034f87 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(labelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 00000000..1e0d578e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,201 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 00000000..18a99d5f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 00000000..3d383a73 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 00000000..71d406bd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 00000000..ba3b9333 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,301 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This causes a stop-the-world, which is very short with Go1.9+ +// (~25µs). However, with older Go versions, the stop-the-world duration depends +// on the heap size and can be quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 00000000..20ee1afb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,614 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx is a complicated one. For lock-free yet atomic + // observations, we need to save the total count of observations again, + // combined with the index of the currently-hot counts struct, so that + // we can perform the operation on both values atomically. The least + // significant bit defines the hot counts struct. The remaining 63 bits + // represent the total count of observations. This happens under the + // assumption that the 63bit count will never overflow. Rationale: An + // observations takes about 30ns. Let's assume it could happen in + // 10ns. Overflowing the counter will then take at least (2^63)*10ns, + // which is about 3000 years. + // + // This has to be first in the struct for 64bit alignment. See + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + upperBounds []float64 + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + hotIdx int // Index of currently-hot counts. Only used within Write. + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + + // We increment h.countAndHotIdx by 2 so that the counter in the upper + // 63 bits gets incremented by 1. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 2) + hotCounts := h.counts[n%2] + + if i < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[i], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (h *histogram) Write(out *dto.Metric) error { + var ( + his = &dto.Histogram{} + buckets = make([]*dto.Bucket, len(h.upperBounds)) + hotCounts, coldCounts *histogramCounts + count uint64 + ) + + // For simplicity, we mutex the rest of this method. It is not in the + // hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // This is a bit arcane, which is why the following spells out this if + // clause in English: + // + // If the currently-hot counts struct is #0, we atomically increment + // h.countAndHotIdx by 1 so that from now on Observe will use the counts + // struct #1. Furthermore, the atomic increment gives us the new value, + // which, in its most significant 63 bits, tells us the count of + // observations done so far up to and including currently ongoing + // observations still using the counts struct just changed from hot to + // cold. To have a normal uint64 for the count, we bitshift by 1 and + // save the result in count. We also set h.hotIdx to 1 for the next + // Write call, and we will refer to counts #1 as hotCounts and to counts + // #0 as coldCounts. + // + // If the currently-hot counts struct is #1, we do the corresponding + // things the other way round. We have to _decrement_ h.countAndHotIdx + // (which is a bit arcane in itself, as we have to express -1 with an + // unsigned int...). + if h.hotIdx == 0 { + count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 + h.hotIdx = 1 + hotCounts = h.counts[1] + coldCounts = h.counts[0] + } else { + count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. + h.hotIdx = 0 + hotCounts = h.counts[0] + coldCounts = h.counts[1] + } + + // Now we have to wait for the now-declared-cold counts to actually cool + // down, i.e. wait for all observations still using it to finish. That's + // the case once the count in the cold counts struct is the same as the + // one atomically retrieved from the upper 63bits of h.countAndHotIdx. + for { + if count == atomic.LoadUint64(&coldCounts.count) { + break + } + runtime.Gosched() // Let observations get work done. + } + + his.SampleCount = proto.Uint64(count) + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + } + + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 00000000..0fa339ae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,503 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "compress/gzip" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead. +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) +// instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + httpError(rsp, err) + return + } + + contentType := expfmt.Negotiate(req.Header) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + httpError(rsp, err) + return + } + } + }) +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: (1) It uses Summaries +// rather than Histograms. Summaries are not useful if aggregation across +// multiple instances is required. (2) It uses microseconds as unit, which is +// deprecated and should be replaced by seconds. (3) The size of the request is +// calculated in a separate goroutine. Since this calculator requires access to +// the request header, it creates a race with any writes to the header performed +// during request handling. httputil.ReverseProxy is a prominent example for a +// handler performing such writes. (4) It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current goroutine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 00000000..351c26e1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 00000000..2744443a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 00000000..55e6d86d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "time" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 00000000..5806cd09 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 00000000..55176d58 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,204 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "os" + + "github.com/prometheus/procfs" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// Currently, the collector depends on a Linux-style proc filesystem and +// therefore only exports metrics for Linux. +// +// Note: An older version of this function had the following signature: +// +// NewProcessCollector(pid int, namespace string) Collector +// +// Most commonly, it was called as +// +// NewProcessCollector(os.Getpid(), "") +// +// The following call of the current version is equivalent to the above: +// +// NewProcessCollector(ProcessCollectorOpts{}) +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 00000000..f2fb67ae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,937 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe methed does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := hashNew() + h = hashAdd(h, name) + h = hashAddByte(h, separatorByte) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetName()) + h = hashAddByte(h, separatorByte) + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(labelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 00000000..e4c87145 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,775 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx is a complicated one. For lock-free yet atomic + // observations, we need to save the total count of observations again, + // combined with the index of the currently-hot counts struct, so that + // we can perform the operation on both values atomically. The least + // significant bit defines the hot counts struct. The remaining 63 bits + // represent the total count of observations. This happens under the + // assumption that the 63bit count will never overflow. Rationale: An + // observations takes about 30ns. Let's assume it could happen in + // 10ns. Overflowing the counter will then take at least (2^63)*10ns, + // which is about 3000 years. + // + // This has to be first in the struct for 64bit alignment. See + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + hotIdx int // Index of currently-hot counts. Only used within Write. + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment s.countAndHotIdx by 2 so that the counter in the upper + // 63 bits gets incremented by 1. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 2) + hotCounts := s.counts[n%2] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + var ( + sum = &dto.Summary{} + hotCounts, coldCounts *summaryCounts + count uint64 + ) + + // For simplicity, we mutex the rest of this method. It is not in the + // hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // This is a bit arcane, which is why the following spells out this if + // clause in English: + // + // If the currently-hot counts struct is #0, we atomically increment + // s.countAndHotIdx by 1 so that from now on Observe will use the counts + // struct #1. Furthermore, the atomic increment gives us the new value, + // which, in its most significant 63 bits, tells us the count of + // observations done so far up to and including currently ongoing + // observations still using the counts struct just changed from hot to + // cold. To have a normal uint64 for the count, we bitshift by 1 and + // save the result in count. We also set s.hotIdx to 1 for the next + // Write call, and we will refer to counts #1 as hotCounts and to counts + // #0 as coldCounts. + // + // If the currently-hot counts struct is #1, we do the corresponding + // things the other way round. We have to _decrement_ s.countAndHotIdx + // (which is a bit arcane in itself, as we have to express -1 with an + // unsigned int...). + if s.hotIdx == 0 { + count = atomic.AddUint64(&s.countAndHotIdx, 1) >> 1 + s.hotIdx = 1 + hotCounts = s.counts[1] + coldCounts = s.counts[0] + } else { + count = atomic.AddUint64(&s.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. + s.hotIdx = 0 + hotCounts = s.counts[0] + coldCounts = s.counts[1] + } + + // Now we have to wait for the now-declared-cold counts to actually cool + // down, i.e. wait for all observations still using it to finish. That's + // the case once the count in the cold counts struct is the same as the + // one atomically retrieved from the upper 63bits of s.countAndHotIdx. + for { + if count == atomic.LoadUint64(&coldCounts.count) { + break + } + runtime.Gosched() // Let observations get work done. + } + + sum.SampleCount = proto.Uint64(count) + sum.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 00000000..8d5f1052 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 00000000..0f9ce63f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 00000000..eb248f10 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 00000000..14ed9e85 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 00000000..49159bf3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,179 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 00000000..20110e41 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 00000000..9805432c --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,629 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client // import "github.com/prometheus/client_model/go" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000..636a2c1a --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 00000000..c092723e --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 00000000..11839ed6 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 00000000..c71bcb98 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 00000000..dc2eedee --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 00000000..8e473d0f --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,468 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialBufSize = 512 + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bytes.Buffer from the sync.Pool and write out its content to out in a + // single go in the end. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bytes.Buffer) + b.Reset() + w = b + defer func() { + bWritten, bErr := out.Write(b.Bytes()) + written = bWritten + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } else { + return escaper.WriteString(w, v) + } +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 00000000..ec3d86ba --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 00000000..7723656d --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 00000000..26e92288 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 00000000..35e739c7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 00000000..fc4de410 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 00000000..038fc1c9 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 00000000..41051a01 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 00000000..6eda08a7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 00000000..00804b7f --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 00000000..a7b96917 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 00000000..8762b13c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 00000000..bb99889d --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 00000000..46259b1f --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 00000000..c9d8fb1a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 00000000..25e3659a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 00000000..40503edb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 00000000..f1d3b993 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1,2 @@ +* Tobias Schmidt @grobie +* Johannes 'fish' Ziemke @discordianfish diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 00000000..947d7d8f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,30 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +%/.unpacked: %.ttar + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +update_fixtures: fixtures.ttar sysfs/fixtures.ttar + +%fixtures.ttar: %/fixtures + rm -v $(dir $*)fixtures/.unpacked + ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ + +.PHONY: build +build: + +.PHONY: test +test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 00000000..741579e6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,223 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +unexport GOVENDOR +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif + + unexport GO111MODULE +endif +PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = ./... + +GO_VERSION ?= $(shell $(GO) version) +GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) + +PROMU_VERSION ?= 0.2.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKER_REPO ?= prom + +.PHONY: all +all: precheck style staticcheck unused build test + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-test-short +common-test-short: + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-staticcheck +common-staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +else + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +endif + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod +ifneq (,$(wildcard vendor)) + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker +common-docker: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +.PHONY: common-docker-publish +common-docker-publish: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + +.PHONY: common-docker-tag-latest +common-docker-tag-latest: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + curl -s -L $(PROMU_URL) | tar -xvz -C /tmp + mkdir -v -p $(FIRST_GOPATH)/bin + cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +.PHONY: $(STATICCHECK) +$(STATICCHECK): +ifdef GO111MODULE +# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. +# See https://github.com/golang/go/issues/27643. +# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. + tmpModule=$$(mktemp -d 2>&1) && \ + mkdir -p $${tmpModule}/staticcheck && \ + cd "$${tmpModule}"/staticcheck && \ + GO111MODULE=on $(GO) mod init example.com/staticcheck && \ + GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ + rm -rf $${tmpModule}; +else + GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 00000000..20954947 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 00000000..d3a82680 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 00000000..e2acd6d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 00000000..13c831ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,462 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cwd +SymlinkTo: /usr/bin +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/mountstats +Lines: 19 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/root +SymlinkTo: / +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cwd +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/root +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo +Lines: 3 +Node 0, zone +Node 0, zone +Node 0, zone +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/mdstat +Lines: 26 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0] sdb3[1] + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2] sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/.unpacked +Lines: 0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..b6c6b2ce --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 00000000..e89ee6c9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1 @@ +module github.com/prometheus/procfs diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 00000000..2ff228e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go new file mode 100644 index 00000000..df0d567b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 00000000..e36d4a3b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 00000000..9dc19583 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 00000000..7a8a1e09 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 00000000..3f252337 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 00000000..651bf681 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 00000000..95a83cc5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 00000000..c0d3a5ad --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 00000000..57bb4a35 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 00000000..06bed0ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,258 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 00000000..0251c83b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 00000000..f04ba6fd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 00000000..d06c26eb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 00000000..3cf2a9f1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,188 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..61eb6b0e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100644 index 00000000..b0171a12 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE + +while getopts :cf:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -e "$path" ] || [ -L "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 00000000..8f1508f0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 00000000..b3d8634d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + // fieldPushAil = "push_ail" + // fieldXstrat = "xstrat" + // fieldAbtb2 = "abtb2" + // fieldAbtc2 = "abtc2" + // fieldBmbt2 = "bmbt2" + // fieldIbt2 = "ibt2" + // fieldFibt2 = "fibt2" + // fieldQm = "qm" + // fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 00000000..d86794b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/LICENSE b/vendor/github.com/shirou/gopsutil/LICENSE new file mode 100644 index 00000000..da71a5e7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/LICENSE @@ -0,0 +1,61 @@ +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/cpu/cpu.go new file mode 100644 index 00000000..ec0a85cd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu.go @@ -0,0 +1,184 @@ +package cpu + +import ( + "context" + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/shirou/gopsutil/internal/common" +) + +// TimesStat contains the amounts of time the CPU has spent performing different +// kinds of work. Time units are in USER_HZ or Jiffies (typically hundredths of +// a second). It is based on linux /proc/stat file. +type TimesStat struct { + CPU string `json:"cpu"` + User float64 `json:"user"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Nice float64 `json:"nice"` + Iowait float64 `json:"iowait"` + Irq float64 `json:"irq"` + Softirq float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type InfoStat struct { + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` +} + +type lastPercent struct { + sync.Mutex + lastCPUTimes []TimesStat + lastPerCPUTimes []TimesStat +} + +var lastCPUPercent lastPercent +var invoke common.Invoker = common.Invoke{} + +func init() { + lastCPUPercent.Lock() + lastCPUPercent.lastCPUTimes, _ = Times(false) + lastCPUPercent.lastPerCPUTimes, _ = Times(true) + lastCPUPercent.Unlock() +} + +// Counts returns the number of physical or logical cores in the system +func Counts(logical bool) (int, error) { + return CountsWithContext(context.Background(), logical) +} + +func (c TimesStat) String() string { + v := []string{ + `"cpu":"` + c.CPU + `"`, + `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64), + `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64), + `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64), + `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64), + `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64), + `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64), + `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64), + `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64), + `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64), + `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64), + } + + return `{` + strings.Join(v, ",") + `}` +} + +// Total returns the total number of seconds in a CPUTimesStat +func (c TimesStat) Total() float64 { + total := c.User + c.System + c.Nice + c.Iowait + c.Irq + c.Softirq + c.Steal + + c.Guest + c.GuestNice + c.Idle + return total +} + +func (c InfoStat) String() string { + s, _ := json.Marshal(c) + return string(s) +} + +func getAllBusy(t TimesStat) (float64, float64) { + busy := t.User + t.System + t.Nice + t.Iowait + t.Irq + + t.Softirq + t.Steal + t.Guest + t.GuestNice + return busy + t.Idle, busy +} + +func calculateBusy(t1, t2 TimesStat) float64 { + t1All, t1Busy := getAllBusy(t1) + t2All, t2Busy := getAllBusy(t2) + + if t2Busy <= t1Busy { + return 0 + } + if t2All <= t1All { + return 100 + } + return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100)) +} + +func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) { + // Make sure the CPU measurements have the same length. + if len(t1) != len(t2) { + return nil, fmt.Errorf( + "received two CPU counts: %d != %d", + len(t1), len(t2), + ) + } + + ret := make([]float64, len(t1)) + for i, t := range t2 { + ret[i] = calculateBusy(t1[i], t) + } + return ret, nil +} + +// Percent calculates the percentage of cpu used either per CPU or combined. +// If an interval of 0 is given it will compare the current cpu times against the last call. +// Returns one value per cpu, or a single value if percpu is set to false. +func Percent(interval time.Duration, percpu bool) ([]float64, error) { + return PercentWithContext(context.Background(), interval, percpu) +} + +func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { + if interval <= 0 { + return percentUsedFromLastCall(percpu) + } + + // Get CPU usage at the start of the interval. + cpuTimes1, err := Times(percpu) + if err != nil { + return nil, err + } + + time.Sleep(interval) + + // And at the end of the interval. + cpuTimes2, err := Times(percpu) + if err != nil { + return nil, err + } + + return calculateAllBusy(cpuTimes1, cpuTimes2) +} + +func percentUsedFromLastCall(percpu bool) ([]float64, error) { + cpuTimes, err := Times(percpu) + if err != nil { + return nil, err + } + lastCPUPercent.Lock() + defer lastCPUPercent.Unlock() + var lastTimes []TimesStat + if percpu { + lastTimes = lastCPUPercent.lastPerCPUTimes + lastCPUPercent.lastPerCPUTimes = cpuTimes + } else { + lastTimes = lastCPUPercent.lastCPUTimes + lastCPUPercent.lastCPUTimes = cpuTimes + } + + if lastTimes == nil { + return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") + } + return calculateAllBusy(lastTimes, cpuTimes) +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go new file mode 100644 index 00000000..cd0475d3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go @@ -0,0 +1,103 @@ +// +build darwin + +package cpu + +import ( + "context" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// sys/resource.h +const ( + CPUser = 0 + CPNice = 1 + CPSys = 2 + CPIntr = 3 + CPIdle = 4 + CPUStates = 5 +) + +// default value. from time.h +var ClocksPerSec = float64(128) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + return allCPUTimes() +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + if err != nil { + return ret, err + } + c.Mhz = float64(cpuFrequency) / 1000000.0 + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go new file mode 100644 index 00000000..180e0afa --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go @@ -0,0 +1,111 @@ +// +build darwin +// +build cgo + +package cpu + +/* +#include +#include +#include +#include +#include +#include +#if TARGET_OS_MAC +#include +#endif +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "unsafe" +) + +// these CPU times for darwin is borrowed from influxdb/telegraf. + +func perCPUTimes() ([]TimesStat, error) { + var ( + count C.mach_msg_type_number_t + cpuload *C.processor_cpu_load_info_data_t + ncpu C.natural_t + ) + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] + + bbuf := bytes.NewBuffer(buf) + + var ret []TimesStat + + for i := 0; i < int(ncpu); i++ { + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return nil, err + } + + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes() ([]TimesStat, error) { + var count C.mach_msg_type_number_t + var cpuload C.host_cpu_load_info_data_t + + count = C.HOST_CPU_LOAD_INFO_COUNT + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + return []TimesStat{c}, nil + +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go new file mode 100644 index 00000000..242b4a8e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go @@ -0,0 +1,14 @@ +// +build darwin +// +build !cgo + +package cpu + +import "github.com/shirou/gopsutil/internal/common" + +func perCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func allCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go new file mode 100644 index 00000000..fbb06083 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go @@ -0,0 +1,30 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package cpu + +import ( + "context" + "runtime" + + "github.com/shirou/gopsutil/internal/common" +) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + return []InfoStat{}, common.ErrNotImplementedError +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go new file mode 100644 index 00000000..57beffae --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go @@ -0,0 +1,173 @@ +package cpu + +import ( + "context" + "fmt" + "os/exec" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +var ClocksPerSec = float64(128) +var cpuMatch = regexp.MustCompile(`^CPU:`) +var originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Family\s*=\s*(.+)\s+Model\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`) +var featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`) +var featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`) +var cpuEnd = regexp.MustCompile(`^Trying to mount root`) +var cpuCores = regexp.MustCompile(`FreeBSD/SMP: (\d*) package\(s\) x (\d*) core\(s\)`) +var cpuTimesSize int +var emptyTimes cpuTimes + +func init() { + getconf, err := exec.LookPath("getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } +} + +func timeStat(name string, t *cpuTimes) *TimesStat { + return &TimesStat{ + User: float64(t.User) / ClocksPerSec, + Nice: float64(t.Nice) / ClocksPerSec, + System: float64(t.Sys) / ClocksPerSec, + Idle: float64(t.Idle) / ClocksPerSec, + Irq: float64(t.Intr) / ClocksPerSec, + CPU: name, + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + buf, err := unix.SysctlRaw("kern.cp_times") + if err != nil { + return nil, err + } + + // We can't do this in init due to the conflict with cpu.init() + if cpuTimesSize == 0 { + cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size()) + } + + ncpus := len(buf) / cpuTimesSize + ret := make([]TimesStat, 0, ncpus) + for i := 0; i < ncpus; i++ { + times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize])) + if *times == emptyTimes { + // CPU not present + continue + } + ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times)) + } + return ret, nil + } + + buf, err := unix.SysctlRaw("kern.cp_time") + if err != nil { + return nil, err + } + + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + return []TimesStat{*timeStat("cpu-total", times)}, nil +} + +// Returns only one InfoStat on FreeBSD. The information regarding core +// count, however is accurate and it is assumed that all InfoStat attributes +// are the same across CPUs. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + const dmesgBoot = "/var/run/dmesg.boot" + + c, num, err := parseDmesgBoot(dmesgBoot) + if err != nil { + return nil, err + } + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + if u32, err = unix.SysctlUint32("hw.ncpu"); err != nil { + return nil, err + } + c.Cores = int32(u32) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + ret := make([]InfoStat, num) + for i := 0; i < num; i++ { + ret[i] = c + } + + return ret, nil +} + +func parseDmesgBoot(fileName string) (InfoStat, int, error) { + c := InfoStat{} + lines, _ := common.ReadLines(fileName) + cpuNum := 1 // default cpu num is 1 + for _, line := range lines { + if matches := cpuEnd.FindStringSubmatch(line); matches != nil { + break + } else if matches := originMatch.FindStringSubmatch(line); matches != nil { + c.VendorID = matches[1] + c.Family = matches[3] + c.Model = matches[4] + t, err := strconv.ParseInt(matches[5], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + } + c.Stepping = int32(t) + } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { + t, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + } + cpuNum = int(t) + t2, err := strconv.ParseInt(matches[2], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + } + c.Cores = int32(t2) + } + } + + return c, cpuNum, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go new file mode 100644 index 00000000..8b7f4c32 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go new file mode 100644 index 00000000..57e14528 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go new file mode 100644 index 00000000..8b7f4c32 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go new file mode 100644 index 00000000..be98dd79 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go @@ -0,0 +1,352 @@ +// +build linux + +package cpu + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +var CPUTick = float64(100) + +func init() { + getconf, err := exec.LookPath("getconf") + if err != nil { + return + } + out, err := invoke.CommandWithContext(context.Background(), getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + CPUTick = i + } + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + filename := common.HostProc("stat") + var lines = []string{} + if percpu { + statlines, err := common.ReadLines(filename) + if err != nil || len(statlines) < 2 { + return []TimesStat{}, nil + } + for _, line := range statlines[1:] { + if !strings.HasPrefix(line, "cpu") { + break + } + lines = append(lines, line) + } + } else { + lines, _ = common.ReadLinesOffsetN(filename, 0, 1) + } + + ret := make([]TimesStat, 0, len(lines)) + + for _, line := range lines { + ct, err := parseStatLine(line) + if err != nil { + continue + } + ret = append(ret, *ct) + + } + return ret, nil +} + +func sysCPUPath(cpu int32, relPath string) string { + return common.HostSys(fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath) +} + +func finishCPUInfo(c *InfoStat) error { + var lines []string + var err error + var value float64 + + if len(c.CoreID) == 0 { + lines, err = common.ReadLines(sysCPUPath(c.CPU, "topology/core_id")) + if err == nil { + c.CoreID = lines[0] + } + } + + // override the value of c.Mhz with cpufreq/cpuinfo_max_freq regardless + // of the value from /proc/cpuinfo because we want to report the maximum + // clock-speed of the CPU for c.Mhz, matching the behaviour of Windows + lines, err = common.ReadLines(sysCPUPath(c.CPU, "cpufreq/cpuinfo_max_freq")) + // if we encounter errors below such as there are no cpuinfo_max_freq file, + // we just ignore. so let Mhz is 0. + if err != nil { + return nil + } + value, err = strconv.ParseFloat(lines[0], 64) + if err != nil { + return nil + } + c.Mhz = value / 1000.0 // value is in kHz + if c.Mhz > 9999 { + c.Mhz = c.Mhz / 1000.0 // value in Hz + } + return nil +} + +// CPUInfo on linux will return 1 item per physical thread. +// +// CPUs have three levels of counting: sockets, cores, threads. +// Cores with HyperThreading count as having 2 threads per core. +// Sockets often come with many physical CPU cores. +// For example a single socket board with two cores each with HT will +// return 4 CPUInfoStat structs on Linux and the "Cores" field set to 1. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + filename := common.HostProc("cpuinfo") + lines, _ := common.ReadLines(filename) + + var ret []InfoStat + var processorName string + + c := InfoStat{CPU: -1, Cores: 1} + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + + switch key { + case "Processor": + processorName = value + case "processor": + if c.CPU >= 0 { + err := finishCPUInfo(&c) + if err != nil { + return ret, err + } + ret = append(ret, c) + } + c = InfoStat{Cores: 1, ModelName: processorName} + t, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return ret, err + } + c.CPU = int32(t) + case "vendorId", "vendor_id": + c.VendorID = value + case "cpu family": + c.Family = value + case "model": + c.Model = value + case "model name", "cpu": + c.ModelName = value + if strings.Contains(value, "POWER8") || + strings.Contains(value, "POWER7") { + c.Model = strings.Split(value, " ")[0] + c.Family = "POWER" + c.VendorID = "IBM" + } + case "stepping", "revision": + val := value + + if key == "revision" { + val = strings.Split(value, ".")[0] + } + + t, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return ret, err + } + c.Stepping = int32(t) + case "cpu MHz", "clock": + // treat this as the fallback value, thus we ignore error + if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil { + c.Mhz = t + } + case "cache size": + t, err := strconv.ParseInt(strings.Replace(value, " KB", "", 1), 10, 64) + if err != nil { + return ret, err + } + c.CacheSize = int32(t) + case "physical id": + c.PhysicalID = value + case "core id": + c.CoreID = value + case "flags", "Features": + c.Flags = strings.FieldsFunc(value, func(r rune) bool { + return r == ',' || r == ' ' + }) + case "microcode": + c.Microcode = value + } + } + if c.CPU >= 0 { + err := finishCPUInfo(&c) + if err != nil { + return ret, err + } + ret = append(ret, c) + } + return ret, nil +} + +func parseStatLine(line string) (*TimesStat, error) { + fields := strings.Fields(line) + + if len(fields) == 0 { + return nil, errors.New("stat does not contain cpu info") + } + + if strings.HasPrefix(fields[0], "cpu") == false { + return nil, errors.New("not contain cpu") + } + + cpu := fields[0] + if cpu == "cpu" { + cpu = "cpu-total" + } + user, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return nil, err + } + nice, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return nil, err + } + system, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return nil, err + } + idle, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, err + } + iowait, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return nil, err + } + irq, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return nil, err + } + softirq, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return nil, err + } + + ct := &TimesStat{ + CPU: cpu, + User: user / CPUTick, + Nice: nice / CPUTick, + System: system / CPUTick, + Idle: idle / CPUTick, + Iowait: iowait / CPUTick, + Irq: irq / CPUTick, + Softirq: softirq / CPUTick, + } + if len(fields) > 8 { // Linux >= 2.6.11 + steal, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return nil, err + } + ct.Steal = steal / CPUTick + } + if len(fields) > 9 { // Linux >= 2.6.24 + guest, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return nil, err + } + ct.Guest = guest / CPUTick + } + if len(fields) > 10 { // Linux >= 3.2.0 + guestNice, err := strconv.ParseFloat(fields[10], 64) + if err != nil { + return nil, err + } + ct.GuestNice = guestNice / CPUTick + } + + return ct, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + if logical { + ret := 0 + // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_pslinux.py#L599 + procCpuinfo := common.HostProc("cpuinfo") + lines, err := common.ReadLines(procCpuinfo) + if err == nil { + for _, line := range lines { + line = strings.ToLower(line) + if strings.HasPrefix(line, "processor") { + ret++ + } + } + } + if ret == 0 { + procStat := common.HostProc("stat") + lines, err = common.ReadLines(procStat) + if err != nil { + return 0, err + } + for _, line := range lines { + if len(line) >= 4 && strings.HasPrefix(line, "cpu") && '0' <= line[3] && line[3] <= '9' { // `^cpu\d` regexp matching + ret++ + } + } + } + return ret, nil + } + // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_pslinux.py#L628 + filename := common.HostProc("cpuinfo") + lines, err := common.ReadLines(filename) + if err != nil { + return 0, err + } + mapping := make(map[int]int) + currentInfo := make(map[string]int) + for _, line := range lines { + line = strings.ToLower(strings.TrimSpace(line)) + if line == "" { + // new section + id, okID := currentInfo["physical id"] + cores, okCores := currentInfo["cpu cores"] + if okID && okCores { + mapping[id] = cores + } + currentInfo = make(map[string]int) + continue + } + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + fields[0] = strings.TrimSpace(fields[0]) + if fields[0] == "physical id" || fields[0] == "cpu cores" { + val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + if err != nil { + continue + } + currentInfo[fields[0]] = val + } + } + ret := 0 + for _, v := range mapping { + ret += v + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go new file mode 100644 index 00000000..92a8bd75 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go @@ -0,0 +1,195 @@ +// +build openbsd + +package cpu + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +// sys/sched.h +var ( + CPUser = 0 + CPNice = 1 + CPSys = 2 + CPIntr = 3 + CPIdle = 4 + CPUStates = 5 +) + +// sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + CTLHw = 6 // CTL_HW + SMT = 24 // HW_SMT + NCpuOnline = 25 // HW_NCPUONLINE + KernCptime = 40 // KERN_CPTIME + KernCptime2 = 71 // KERN_CPTIME2 +) + +var ClocksPerSec = float64(128) + +func init() { + func() { + getconf, err := exec.LookPath("getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } + }() + func() { + v, err := unix.Sysctl("kern.osrelease") // can't reuse host.PlatformInformation because of circular import + if err != nil { + return + } + v = strings.ToLower(v) + version, err := strconv.ParseFloat(v, 64) + if err != nil { + return + } + if version >= 6.4 { + CPIntr = 4 + CPIdle = 5 + CPUStates = 6 + } + }() +} + +func smt() (bool, error) { + mib := []int32{CTLHw, SMT} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return false, err + } + + var ret bool + br := bytes.NewReader(buf) + if err := binary.Read(br, binary.LittleEndian, &ret); err != nil { + return false, err + } + + return ret, nil +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + + var ncpu int + if percpu { + ncpu, _ = Counts(true) + } else { + ncpu = 1 + } + + smt, err := smt() + if err == syscall.EOPNOTSUPP { + // if hw.smt is not applicable for this platform (e.g. i386), + // pretend it's enabled + smt = true + } else if err != nil { + return nil, err + } + + for i := 0; i < ncpu; i++ { + j := i + if !smt { + j *= 2 + } + + var cpuTimes = make([]int32, CPUStates) + var mib []int32 + if percpu { + mib = []int32{CTLKern, KernCptime2, int32(j)} + } else { + mib = []int32{CTLKern, KernCptime} + } + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + br := bytes.NewReader(buf) + err = binary.Read(br, binary.LittleEndian, &cpuTimes) + if err != nil { + return ret, err + } + c := TimesStat{ + User: float64(cpuTimes[CPUser]) / ClocksPerSec, + Nice: float64(cpuTimes[CPNice]) / ClocksPerSec, + System: float64(cpuTimes[CPSys]) / ClocksPerSec, + Idle: float64(cpuTimes[CPIdle]) / ClocksPerSec, + Irq: float64(cpuTimes[CPIntr]) / ClocksPerSec, + } + if percpu { + c.CPU = fmt.Sprintf("cpu%d", j) + } else { + c.CPU = "cpu-total" + } + ret = append(ret, c) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on OpenBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.cpuspeed"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + mib := []int32{CTLHw, NCpuOnline} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + + var ncpu int32 + br := bytes.NewReader(buf) + err = binary.Read(br, binary.LittleEndian, &ncpu) + if err != nil { + return nil, err + } + c.Cores = ncpu + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go new file mode 100644 index 00000000..3de09842 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go @@ -0,0 +1,286 @@ +package cpu + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "runtime" + "sort" + "strconv" + "strings" +) + +var ClocksPerSec = float64(128) + +func init() { + getconf, err := exec.LookPath("getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } +} + +//sum all values in a float64 map with float64 keys +func msum(x map[float64]float64) float64 { + total := 0.0 + for _, y := range x { + total += y + } + return total +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + kstatSys, err := exec.LookPath("kstat") + if err != nil { + return nil, fmt.Errorf("cannot find kstat: %s", err) + } + cpu := make(map[float64]float64) + idle := make(map[float64]float64) + user := make(map[float64]float64) + kern := make(map[float64]float64) + iowt := make(map[float64]float64) + //swap := make(map[float64]float64) + kstatSysOut, err := invoke.CommandWithContext(ctx, kstatSys, "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %s", err) + } + re := regexp.MustCompile(`[:\s]+`) + for _, line := range strings.Split(string(kstatSysOut), "\n") { + fields := re.Split(line, -1) + if fields[0] != "cpu_stat" { + continue + } + cpuNumber, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse cpu number: %s", err) + } + cpu[cpuNumber] = cpuNumber + switch fields[3] { + case "idle": + idle[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idle: %s", err) + } + case "user": + user[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse user: %s", err) + } + case "kernel": + kern[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse kernel: %s", err) + } + case "iowait": + iowt[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse iowait: %s", err) + } + //not sure how this translates, don't report, add to kernel, something else? + /*case "swap": + swap[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse swap: %s", err) + } */ + } + } + ret := make([]TimesStat, 0, len(cpu)) + if percpu { + for _, c := range cpu { + ct := &TimesStat{ + CPU: fmt.Sprintf("cpu%d", int(cpu[c])), + Idle: idle[c] / ClocksPerSec, + User: user[c] / ClocksPerSec, + System: kern[c] / ClocksPerSec, + Iowait: iowt[c] / ClocksPerSec, + } + ret = append(ret, *ct) + } + } else { + ct := &TimesStat{ + CPU: "cpu-total", + Idle: msum(idle) / ClocksPerSec, + User: msum(user) / ClocksPerSec, + System: msum(kern) / ClocksPerSec, + Iowait: msum(iowt) / ClocksPerSec, + } + ret = append(ret, *ct) + } + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + psrInfo, err := exec.LookPath("psrinfo") + if err != nil { + return nil, fmt.Errorf("cannot find psrinfo: %s", err) + } + psrInfoOut, err := invoke.CommandWithContext(ctx, psrInfo, "-p", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute psrinfo: %s", err) + } + + isaInfo, err := exec.LookPath("isainfo") + if err != nil { + return nil, fmt.Errorf("cannot find isainfo: %s", err) + } + isaInfoOut, err := invoke.CommandWithContext(ctx, isaInfo, "-b", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute isainfo: %s", err) + } + + procs, err := parseProcessorInfo(string(psrInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + } + + flags, err := parseISAInfo(string(isaInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing isainfo output: %s", err) + } + + result := make([]InfoStat, 0, len(flags)) + for _, proc := range procs { + procWithFlags := proc + procWithFlags.Flags = flags + result = append(result, procWithFlags) + } + + return result, nil +} + +var flagsMatch = regexp.MustCompile(`[\w\.]+`) + +func parseISAInfo(cmdOutput string) ([]string, error) { + words := flagsMatch.FindAllString(cmdOutput, -1) + + // Sanity check the output + if len(words) < 4 || words[1] != "bit" || words[3] != "applications" { + return nil, errors.New("attempted to parse invalid isainfo output") + } + + flags := make([]string, len(words)-4) + for i, val := range words[4:] { + flags[i] = val + } + sort.Strings(flags) + + return flags, nil +} + +var psrInfoMatch = regexp.MustCompile(`The physical processor has (?:([\d]+) virtual processor \(([\d]+)\)|([\d]+) cores and ([\d]+) virtual processors[^\n]+)\n(?:\s+ The core has.+\n)*\s+.+ \((\w+) ([\S]+) family (.+) model (.+) step (.+) clock (.+) MHz\)\n[\s]*(.*)`) + +const ( + psrNumCoresOffset = 1 + psrNumCoresHTOffset = 3 + psrNumHTOffset = 4 + psrVendorIDOffset = 5 + psrFamilyOffset = 7 + psrModelOffset = 8 + psrStepOffset = 9 + psrClockOffset = 10 + psrModelNameOffset = 11 +) + +func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { + matches := psrInfoMatch.FindAllStringSubmatch(cmdOutput, -1) + + var infoStatCount int32 + result := make([]InfoStat, 0, len(matches)) + for physicalIndex, physicalCPU := range matches { + var step int32 + var clock float64 + + if physicalCPU[psrStepOffset] != "" { + stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err) + } + step = int32(stepParsed) + } + + if physicalCPU[psrClockOffset] != "" { + clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err) + } + clock = float64(clockParsed) + } + + var err error + var numCores int64 + var numHT int64 + switch { + case physicalCPU[psrNumCoresOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: 1, + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + case physicalCPU[psrNumCoresHTOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err) + } + + numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: int32(numHT) / int32(numCores), + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + default: + return nil, errors.New("values for cores with and without hyperthreading are both set") + } + } + return result, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go new file mode 100644 index 00000000..97c0e342 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go @@ -0,0 +1,255 @@ +// +build windows + +package cpu + +import ( + "context" + "fmt" + "unsafe" + + "github.com/StackExchange/wmi" + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procGetActiveProcessorCount = common.Modkernel32.NewProc("GetActiveProcessorCount") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") +) + +type Win32_Processor struct { + LoadPercentage *uint16 + Family uint16 + Manufacturer string + Name string + NumberOfLogicalProcessors uint32 + NumberOfCores uint32 + ProcessorID *string + Stepping *string + MaxClockSpeed uint32 +} + +// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION +// defined in windows api doc with the following +// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information +// additional fields documented here +// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm +type win32_SystemProcessorPerformanceInformation struct { + IdleTime int64 // idle time in 100ns (this is not a filetime). + KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). + UserTime int64 // usertime in 100ns (this is not a filetime). + DpcTime int64 // dpc time in 100ns (this is not a filetime). + InterruptTime int64 // interrupt time in 100ns + InterruptCount uint32 +} + +// Win32_PerfFormattedData_PerfOS_System struct to have count of processes and processor queue length +type Win32_PerfFormattedData_PerfOS_System struct { + Processes uint32 + ProcessorQueueLength uint32 +} + +const ( + win32_TicksPerSecond = 10000000.0 + + // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation + // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 + win32_SystemProcessorPerformanceInformationClass = 8 + + // size of systemProcessorPerformanceInfoSize in memory + win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) +) + +// Times returns times stat per cpu and combined for all CPUs +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + var ret []TimesStat + var lpIdleTime common.FILETIME + var lpKernelTime common.FILETIME + var lpUserTime common.FILETIME + r, _, _ := common.ProcGetSystemTimes.Call( + uintptr(unsafe.Pointer(&lpIdleTime)), + uintptr(unsafe.Pointer(&lpKernelTime)), + uintptr(unsafe.Pointer(&lpUserTime))) + if r == 0 { + return ret, windows.GetLastError() + } + + LOT := float64(0.0000001) + HIT := (LOT * 4294967296.0) + idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime))) + user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime))) + kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime))) + system := (kernel - idle) + + ret = append(ret, TimesStat{ + CPU: "cpu-total", + Idle: float64(idle), + User: float64(user), + System: float64(system), + }) + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var dst []Win32_Processor + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { + return ret, err + } + + var procID string + for i, l := range dst { + procID = "" + if l.ProcessorID != nil { + procID = *l.ProcessorID + } + + cpu := InfoStat{ + CPU: int32(i), + Family: fmt.Sprintf("%d", l.Family), + VendorID: l.Manufacturer, + ModelName: l.Name, + Cores: int32(l.NumberOfLogicalProcessors), + PhysicalID: procID, + Mhz: float64(l.MaxClockSpeed), + Flags: []string{}, + } + ret = append(ret, cpu) + } + + return ret, nil +} + +// ProcInfo returns processes count and processor queue length in the system. +// There is a single queue for processor even on multiprocessors systems. +func ProcInfo() ([]Win32_PerfFormattedData_PerfOS_System, error) { + return ProcInfoWithContext(context.Background()) +} + +func ProcInfoWithContext(ctx context.Context) ([]Win32_PerfFormattedData_PerfOS_System, error) { + var ret []Win32_PerfFormattedData_PerfOS_System + q := wmi.CreateQuery(&ret, "") + err := common.WMIQueryWithContext(ctx, q, &ret) + if err != nil { + return []Win32_PerfFormattedData_PerfOS_System{}, err + } + return ret, err +} + +// perCPUTimes returns times stat per cpu, per core and overall for all CPUs +func perCPUTimes() ([]TimesStat, error) { + var ret []TimesStat + stats, err := perfInfo() + if err != nil { + return nil, err + } + for core, v := range stats { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", core), + User: float64(v.UserTime) / win32_TicksPerSecond, + System: float64(v.KernelTime-v.IdleTime) / win32_TicksPerSecond, + Idle: float64(v.IdleTime) / win32_TicksPerSecond, + Irq: float64(v.InterruptTime) / win32_TicksPerSecond, + } + ret = append(ret, c) + } + return ret, nil +} + +// makes call to Windows API function to retrieve performance information for each core +func perfInfo() ([]win32_SystemProcessorPerformanceInformation, error) { + // Make maxResults large for safety. + // We can't invoke the api call with a results array that's too small. + // If we have more than 2056 cores on a single host, then it's probably the future. + maxBuffer := 2056 + // buffer for results from the windows proc + resultBuffer := make([]win32_SystemProcessorPerformanceInformation, maxBuffer) + // size of the buffer in memory + bufferSize := uintptr(win32_SystemProcessorPerformanceInfoSize) * uintptr(maxBuffer) + // size of the returned response + var retSize uint32 + + // Invoke windows api proc. + // The returned err from the windows dll proc will always be non-nil even when successful. + // See https://godoc.org/golang.org/x/sys/windows#LazyProc.Call for more information + retCode, _, err := common.ProcNtQuerySystemInformation.Call( + win32_SystemProcessorPerformanceInformationClass, // System Information Class -> SystemProcessorPerformanceInformation + uintptr(unsafe.Pointer(&resultBuffer[0])), // pointer to first element in result buffer + bufferSize, // size of the buffer in memory + uintptr(unsafe.Pointer(&retSize)), // pointer to the size of the returned results the windows proc will set this + ) + + // check return code for errors + if retCode != 0 { + return nil, fmt.Errorf("call to NtQuerySystemInformation returned %d. err: %s", retCode, err.Error()) + } + + // calculate the number of returned elements based on the returned size + numReturnedElements := retSize / win32_SystemProcessorPerformanceInfoSize + + // trim results to the number of returned elements + resultBuffer = resultBuffer[:numReturnedElements] + + return resultBuffer, nil +} + +// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. +// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 +// https://github.com/elastic/go-windows/blob/bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd/kernel32.go#L43 +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + if logical { + // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 + err := procGetActiveProcessorCount.Find() + if err == nil { // Win7+ + ret, _, _ := procGetActiveProcessorCount.Call(uintptr(0xffff)) // ALL_PROCESSOR_GROUPS is 0xffff according to Rust's winapi lib https://docs.rs/winapi/*/x86_64-pc-windows-msvc/src/winapi/shared/ntdef.rs.html#120 + if ret != 0 { + return int(ret), nil + } + } + var systemInfo systemInfo + _, _, err = procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + if systemInfo.dwNumberOfProcessors == 0 { + return 0, err + } + return int(systemInfo.dwNumberOfProcessors), nil + } + // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 + // for the time being, try with unreliable and slow WMI call… + var dst []Win32_Processor + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { + return 0, err + } + var count uint32 + for _, d := range dst { + count += d.NumberOfCores + } + return int(count), nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk.go b/vendor/github.com/shirou/gopsutil/disk/disk.go new file mode 100644 index 00000000..38d8a8f1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk.go @@ -0,0 +1,61 @@ +package disk + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type UsageStat struct { + Path string `json:"path"` + Fstype string `json:"fstype"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` + Used uint64 `json:"used"` + UsedPercent float64 `json:"usedPercent"` + InodesTotal uint64 `json:"inodesTotal"` + InodesUsed uint64 `json:"inodesUsed"` + InodesFree uint64 `json:"inodesFree"` + InodesUsedPercent float64 `json:"inodesUsedPercent"` +} + +type PartitionStat struct { + Device string `json:"device"` + Mountpoint string `json:"mountpoint"` + Fstype string `json:"fstype"` + Opts string `json:"opts"` +} + +type IOCountersStat struct { + ReadCount uint64 `json:"readCount"` + MergedReadCount uint64 `json:"mergedReadCount"` + WriteCount uint64 `json:"writeCount"` + MergedWriteCount uint64 `json:"mergedWriteCount"` + ReadBytes uint64 `json:"readBytes"` + WriteBytes uint64 `json:"writeBytes"` + ReadTime uint64 `json:"readTime"` + WriteTime uint64 `json:"writeTime"` + IopsInProgress uint64 `json:"iopsInProgress"` + IoTime uint64 `json:"ioTime"` + WeightedIO uint64 `json:"weightedIO"` + Name string `json:"name"` + SerialNumber string `json:"serialNumber"` + Label string `json:"label"` +} + +func (d UsageStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} + +func (d PartitionStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} + +func (d IOCountersStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin.c b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.c new file mode 100644 index 00000000..198d3d1a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.c @@ -0,0 +1,131 @@ +// https://github.com/lufia/iostat/blob/9f7362b77ad333b26c01c99de52a11bdb650ded2/iostat_darwin.c +#include +#include +#include "disk_darwin.h" + +#define IOKIT 1 /* to get io_name_t in device_types.h */ + +#include +#include +#include +#include + +#include + +static int getdrivestat(io_registry_entry_t d, DriveStats *stat); +static int fillstat(io_registry_entry_t d, DriveStats *stat); + +int +readdrivestat(DriveStats a[], int n) +{ + mach_port_t port; + CFMutableDictionaryRef match; + io_iterator_t drives; + io_registry_entry_t d; + kern_return_t status; + int na, rv; + + IOMasterPort(bootstrap_port, &port); + match = IOServiceMatching("IOMedia"); + CFDictionaryAddValue(match, CFSTR(kIOMediaWholeKey), kCFBooleanTrue); + status = IOServiceGetMatchingServices(port, match, &drives); + if(status != KERN_SUCCESS) + return -1; + + na = 0; + while(na < n && (d=IOIteratorNext(drives)) > 0){ + rv = getdrivestat(d, &a[na]); + if(rv < 0) + return -1; + if(rv > 0) + na++; + IOObjectRelease(d); + } + IOObjectRelease(drives); + return na; +} + +static int +getdrivestat(io_registry_entry_t d, DriveStats *stat) +{ + io_registry_entry_t parent; + kern_return_t status; + CFDictionaryRef props; + CFStringRef name; + CFNumberRef num; + int rv; + + memset(stat, 0, sizeof *stat); + status = IORegistryEntryGetParentEntry(d, kIOServicePlane, &parent); + if(status != KERN_SUCCESS) + return -1; + if(!IOObjectConformsTo(parent, "IOBlockStorageDriver")){ + IOObjectRelease(parent); + return 0; + } + + status = IORegistryEntryCreateCFProperties(d, (CFMutableDictionaryRef *)&props, kCFAllocatorDefault, kNilOptions); + if(status != KERN_SUCCESS){ + IOObjectRelease(parent); + return -1; + } + name = (CFStringRef)CFDictionaryGetValue(props, CFSTR(kIOBSDNameKey)); + CFStringGetCString(name, stat->name, NAMELEN, CFStringGetSystemEncoding()); + num = (CFNumberRef)CFDictionaryGetValue(props, CFSTR(kIOMediaSizeKey)); + CFNumberGetValue(num, kCFNumberSInt64Type, &stat->size); + num = (CFNumberRef)CFDictionaryGetValue(props, CFSTR(kIOMediaPreferredBlockSizeKey)); + CFNumberGetValue(num, kCFNumberSInt64Type, &stat->blocksize); + CFRelease(props); + + rv = fillstat(parent, stat); + IOObjectRelease(parent); + if(rv < 0) + return -1; + return 1; +} + +static struct { + char *key; + size_t off; +} statstab[] = { + {kIOBlockStorageDriverStatisticsBytesReadKey, offsetof(DriveStats, read)}, + {kIOBlockStorageDriverStatisticsBytesWrittenKey, offsetof(DriveStats, written)}, + {kIOBlockStorageDriverStatisticsReadsKey, offsetof(DriveStats, nread)}, + {kIOBlockStorageDriverStatisticsWritesKey, offsetof(DriveStats, nwrite)}, + {kIOBlockStorageDriverStatisticsTotalReadTimeKey, offsetof(DriveStats, readtime)}, + {kIOBlockStorageDriverStatisticsTotalWriteTimeKey, offsetof(DriveStats, writetime)}, + {kIOBlockStorageDriverStatisticsLatentReadTimeKey, offsetof(DriveStats, readlat)}, + {kIOBlockStorageDriverStatisticsLatentWriteTimeKey, offsetof(DriveStats, writelat)}, +}; + +static int +fillstat(io_registry_entry_t d, DriveStats *stat) +{ + CFDictionaryRef props, v; + CFNumberRef num; + kern_return_t status; + typeof(statstab[0]) *bp, *ep; + + status = IORegistryEntryCreateCFProperties(d, (CFMutableDictionaryRef *)&props, kCFAllocatorDefault, kNilOptions); + if(status != KERN_SUCCESS) + return -1; + v = (CFDictionaryRef)CFDictionaryGetValue(props, CFSTR(kIOBlockStorageDriverStatisticsKey)); + if(v == NULL){ + CFRelease(props); + return -1; + } + + ep = &statstab[sizeof(statstab)/sizeof(statstab[0])]; + for(bp = &statstab[0]; bp < ep; bp++){ + CFStringRef s; + + s = CFStringCreateWithCString(kCFAllocatorDefault, bp->key, CFStringGetSystemEncoding()); + num = (CFNumberRef)CFDictionaryGetValue(v, s); + if(num) + CFNumberGetValue(num, kCFNumberSInt64Type, ((char*)stat)+bp->off); + CFRelease(s); + } + + CFRelease(props); + return 0; +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go new file mode 100644 index 00000000..2b1d000d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go @@ -0,0 +1,118 @@ +// +build darwin + +package disk + +import ( + "context" + "path" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + count, err := Getfsstat(nil, MntWait) + if err != nil { + return ret, err + } + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MntWait); err != nil { + return ret, err + } + for _, stat := range fs { + opts := "rw" + if stat.Flags&MntReadOnly != 0 { + opts = "ro" + } + if stat.Flags&MntSynchronous != 0 { + opts += ",sync" + } + if stat.Flags&MntNoExec != 0 { + opts += ",noexec" + } + if stat.Flags&MntNoSuid != 0 { + opts += ",nosuid" + } + if stat.Flags&MntUnion != 0 { + opts += ",union" + } + if stat.Flags&MntAsync != 0 { + opts += ",async" + } + if stat.Flags&MntSuidDir != 0 { + opts += ",suiddir" + } + if stat.Flags&MntSoftDep != 0 { + opts += ",softdep" + } + if stat.Flags&MntNoSymFollow != 0 { + opts += ",nosymfollow" + } + if stat.Flags&MntGEOMJournal != 0 { + opts += ",gjounalc" + } + if stat.Flags&MntMultilabel != 0 { + opts += ",multilabel" + } + if stat.Flags&MntACLs != 0 { + opts += ",acls" + } + if stat.Flags&MntNoATime != 0 { + opts += ",noattime" + } + if stat.Flags&MntClusterRead != 0 { + opts += ",nocluster" + } + if stat.Flags&MntClusterWrite != 0 { + opts += ",noclusterw" + } + if stat.Flags&MntNFS4ACLs != 0 { + opts += ",nfs4acls" + } + d := PartitionStat{ + Device: common.IntToString(stat.Mntfromname[:]), + Mountpoint: common.IntToString(stat.Mntonname[:]), + Fstype: common.IntToString(stat.Fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.Fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h new file mode 100644 index 00000000..c7208499 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h @@ -0,0 +1,33 @@ +// https://github.com/lufia/iostat/blob/9f7362b77ad333b26c01c99de52a11bdb650ded2/iostat_darwin.h +typedef struct DriveStats DriveStats; +typedef struct CPUStats CPUStats; + +enum { + NDRIVE = 16, + NAMELEN = 31 +}; + +struct DriveStats { + char name[NAMELEN+1]; + int64_t size; + int64_t blocksize; + + int64_t read; + int64_t written; + int64_t nread; + int64_t nwrite; + int64_t readtime; + int64_t writetime; + int64_t readlat; + int64_t writelat; +}; + +struct CPUStats { + natural_t user; + natural_t nice; + natural_t sys; + natural_t idle; +}; + +extern int readdrivestat(DriveStats a[], int n); +extern int readcpustat(CPUStats *cpu); diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go new file mode 100644 index 00000000..bd83a4a7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go @@ -0,0 +1,59 @@ +// +build darwin +// +build 386 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +// https://github.com/golang/go/blob/master/src/syscall/ztypes_darwin_386.go#L82 +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go new file mode 100644 index 00000000..ec40a758 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go @@ -0,0 +1,58 @@ +// +build darwin +// +build amd64 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go new file mode 100644 index 00000000..0e3f6700 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go @@ -0,0 +1,58 @@ +// +build darwin +// +build arm64 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go new file mode 100644 index 00000000..623c5fda --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go @@ -0,0 +1,49 @@ +// +build darwin +// +build cgo + +package disk + +/* +#cgo LDFLAGS: -framework CoreFoundation -framework IOKit +#include +#include +#include "disk_darwin.h" +*/ +import "C" + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + var buf [C.NDRIVE]C.DriveStats + n, err := C.readdrivestat(&buf[0], C.int(len(buf))) + if err != nil { + return nil, err + } + ret := make(map[string]IOCountersStat, 0) + for i := 0; i < int(n); i++ { + d := IOCountersStat{ + ReadBytes: uint64(buf[i].read), + WriteBytes: uint64(buf[i].written), + ReadCount: uint64(buf[i].nread), + WriteCount: uint64(buf[i].nwrite), + ReadTime: uint64(buf[i].readtime / 1000 / 1000), // note: read/write time are in ns, but we want ms. + WriteTime: uint64(buf[i].writetime / 1000 / 1000), + IoTime: uint64((buf[i].readtime + buf[i].writetime) / 1000 / 1000), + Name: C.GoString(&buf[i].name[0]), + } + if len(names) > 0 && !common.StringsHas(names, d.Name) { + continue + } + + ret[d.Name] = d + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go new file mode 100644 index 00000000..fe76d83e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go @@ -0,0 +1,18 @@ +// +build darwin +// +build !cgo + +package disk + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go b/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go new file mode 100644 index 00000000..22eb5079 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go @@ -0,0 +1,33 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris + +package disk + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + return []PartitionStat{}, common.ErrNotImplementedError +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go new file mode 100644 index 00000000..2e0966a5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go @@ -0,0 +1,196 @@ +// +build freebsd + +package disk + +import ( + "bytes" + "context" + "encoding/binary" + "path" + "strconv" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/internal/common" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + // get length + count, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + return ret, err + } + + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MNT_WAIT); err != nil { + return ret, err + } + + for _, stat := range fs { + opts := "rw" + if stat.Flags&MNT_RDONLY != 0 { + opts = "ro" + } + if stat.Flags&MNT_SYNCHRONOUS != 0 { + opts += ",sync" + } + if stat.Flags&MNT_NOEXEC != 0 { + opts += ",noexec" + } + if stat.Flags&MNT_NOSUID != 0 { + opts += ",nosuid" + } + if stat.Flags&MNT_UNION != 0 { + opts += ",union" + } + if stat.Flags&MNT_ASYNC != 0 { + opts += ",async" + } + if stat.Flags&MNT_SUIDDIR != 0 { + opts += ",suiddir" + } + if stat.Flags&MNT_SOFTDEP != 0 { + opts += ",softdep" + } + if stat.Flags&MNT_NOSYMFOLLOW != 0 { + opts += ",nosymfollow" + } + if stat.Flags&MNT_GJOURNAL != 0 { + opts += ",gjounalc" + } + if stat.Flags&MNT_MULTILABEL != 0 { + opts += ",multilabel" + } + if stat.Flags&MNT_ACLS != 0 { + opts += ",acls" + } + if stat.Flags&MNT_NOATIME != 0 { + opts += ",noattime" + } + if stat.Flags&MNT_NOCLUSTERR != 0 { + opts += ",nocluster" + } + if stat.Flags&MNT_NOCLUSTERW != 0 { + opts += ",noclusterw" + } + if stat.Flags&MNT_NFS4ACLS != 0 { + opts += ",nfs4acls" + } + + d := PartitionStat{ + Device: common.IntToString(stat.Mntfromname[:]), + Mountpoint: common.IntToString(stat.Mntonname[:]), + Fstype: common.IntToString(stat.Fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + // statinfo->devinfo->devstat + // /usr/include/devinfo.h + ret := make(map[string]IOCountersStat) + + r, err := unix.Sysctl("kern.devstat.all") + if err != nil { + return nil, err + } + buf := []byte(r) + length := len(buf) + + count := int(uint64(length) / uint64(sizeOfDevstat)) + + buf = buf[8:] // devstat.all has version in the head. + // parse buf to Devstat + for i := 0; i < count; i++ { + b := buf[i*sizeOfDevstat : i*sizeOfDevstat+sizeOfDevstat] + d, err := parseDevstat(b) + if err != nil { + continue + } + un := strconv.Itoa(int(d.Unit_number)) + name := common.IntToString(d.Device_name[:]) + un + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + ds := IOCountersStat{ + ReadCount: d.Operations[DEVSTAT_READ], + WriteCount: d.Operations[DEVSTAT_WRITE], + ReadBytes: d.Bytes[DEVSTAT_READ], + WriteBytes: d.Bytes[DEVSTAT_WRITE], + ReadTime: uint64(d.Duration[DEVSTAT_READ].Compute() * 1000), + WriteTime: uint64(d.Duration[DEVSTAT_WRITE].Compute() * 1000), + IoTime: uint64(d.Busy_time.Compute() * 1000), + Name: name, + } + ret[name] = ds + } + + return ret, nil +} + +func (b Bintime) Compute() float64 { + BINTIME_SCALE := 5.42101086242752217003726400434970855712890625e-20 + return float64(b.Sec) + float64(b.Frac)*BINTIME_SCALE +} + +// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE) + +// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go +// change Statfs_t to Statfs in order to get more information +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func parseDevstat(buf []byte) (Devstat, error) { + var ds Devstat + br := bytes.NewReader(buf) + // err := binary.Read(br, binary.LittleEndian, &ds) + err := common.Read(br, binary.LittleEndian, &ds) + if err != nil { + return ds, err + } + + return ds, nil +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.Fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go new file mode 100644 index 00000000..0b3f536c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go @@ -0,0 +1,112 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0xf0 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Id *byte + Sequence1 uint32 +} +type Bintime struct { + Sec int32 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go new file mode 100644 index 00000000..89b617c9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go @@ -0,0 +1,115 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0x120 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Pad_cgo_0 [4]byte + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Pad_cgo_1 [4]byte + ID *byte + Sequence1 uint32 + Pad_cgo_2 [4]byte +} +type Bintime struct { + Sec int64 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go new file mode 100644 index 00000000..0b3f536c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go @@ -0,0 +1,112 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0xf0 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Id *byte + Sequence1 uint32 +} +type Bintime struct { + Sec int32 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/disk/disk_linux.go new file mode 100644 index 00000000..e3d1b3f5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_linux.go @@ -0,0 +1,514 @@ +// +build linux + +package disk + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +const ( + SectorSize = 512 +) +const ( + // man statfs + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xADFF + BDEVFS_MAGIC = 0x62646576 + BEFS_SUPER_MAGIC = 0x42465331 + BFS_MAGIC = 0x1BADFACE + BINFMTFS_MAGIC = 0x42494e4d + BTRFS_SUPER_MAGIC = 0x9123683E + CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_MAGIC_NUMBER = 0xFF534D42 + CODA_SUPER_MAGIC = 0x73757245 + COH_SUPER_MAGIC = 0x012FF7B7 + CRAMFS_MAGIC = 0x28cd3d45 + DEBUGFS_MAGIC = 0x64626720 + DEVFS_SUPER_MAGIC = 0x1373 + DEVPTS_SUPER_MAGIC = 0x1cd1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x00414A53 + EXT_SUPER_MAGIC = 0x137D + EXT2_OLD_SUPER_MAGIC = 0xEF51 + EXT2_SUPER_MAGIC = 0xEF53 + EXT3_SUPER_MAGIC = 0xEF53 + EXT4_SUPER_MAGIC = 0xEF53 + FUSE_SUPER_MAGIC = 0x65735546 + FUTEXFS_SUPER_MAGIC = 0xBAD1DEA + HFS_SUPER_MAGIC = 0x4244 + HFSPLUS_SUPER_MAGIC = 0x482b + HOSTFS_SUPER_MAGIC = 0x00c0ffee + HPFS_SUPER_MAGIC = 0xF995E849 + HUGETLBFS_MAGIC = 0x958458f6 + ISOFS_SUPER_MAGIC = 0x9660 + JFFS2_SUPER_MAGIC = 0x72b6 + JFS_SUPER_MAGIC = 0x3153464a + MINIX_SUPER_MAGIC = 0x137F /* orig. minix */ + MINIX_SUPER_MAGIC2 = 0x138F /* 30 char minix */ + MINIX2_SUPER_MAGIC = 0x2468 /* minix V2 */ + MINIX2_SUPER_MAGIC2 = 0x2478 /* minix V2, 30 char names */ + MINIX3_SUPER_MAGIC = 0x4d5a /* minix V3 fs, 60 char names */ + MQUEUE_MAGIC = 0x19800202 + MSDOS_SUPER_MAGIC = 0x4d44 + NCP_SUPER_MAGIC = 0x564c + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NTFS_SB_MAGIC = 0x5346544e + OCFS2_SUPER_MAGIC = 0x7461636f + OPENPROM_SUPER_MAGIC = 0x9fa1 + PIPEFS_MAGIC = 0x50495045 + PROC_SUPER_MAGIC = 0x9fa0 + PSTOREFS_MAGIC = 0x6165676C + QNX4_SUPER_MAGIC = 0x002f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + REISERFS_SUPER_MAGIC = 0x52654973 + ROMFS_MAGIC = 0x7275 + SELINUX_MAGIC = 0xf97cff8c + SMACK_MAGIC = 0x43415d53 + SMB_SUPER_MAGIC = 0x517B + SOCKFS_MAGIC = 0x534F434B + SQUASHFS_MAGIC = 0x73717368 + SYSFS_MAGIC = 0x62656572 + SYSV2_SUPER_MAGIC = 0x012FF7B6 + SYSV4_SUPER_MAGIC = 0x012FF7B5 + TMPFS_MAGIC = 0x01021994 + UDF_SUPER_MAGIC = 0x15013346 + UFS_MAGIC = 0x00011954 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + V9FS_MAGIC = 0x01021997 + VXFS_SUPER_MAGIC = 0xa501FCF5 + XENFS_SUPER_MAGIC = 0xabba1974 + XENIX_SUPER_MAGIC = 0x012FF7B4 + XFS_SUPER_MAGIC = 0x58465342 + _XIAFS_SUPER_MAGIC = 0x012FD16D + + AFS_SUPER_MAGIC = 0x5346414F + AUFS_SUPER_MAGIC = 0x61756673 + ANON_INODE_FS_SUPER_MAGIC = 0x09041934 + CEPH_SUPER_MAGIC = 0x00C36400 + ECRYPTFS_SUPER_MAGIC = 0xF15F + FAT_SUPER_MAGIC = 0x4006 + FHGFS_SUPER_MAGIC = 0x19830326 + FUSEBLK_SUPER_MAGIC = 0x65735546 + FUSECTL_SUPER_MAGIC = 0x65735543 + GFS_SUPER_MAGIC = 0x1161970 + GPFS_SUPER_MAGIC = 0x47504653 + MTD_INODE_FS_SUPER_MAGIC = 0x11307854 + INOTIFYFS_SUPER_MAGIC = 0x2BAD1DEA + ISOFS_R_WIN_SUPER_MAGIC = 0x4004 + ISOFS_WIN_SUPER_MAGIC = 0x4000 + JFFS_SUPER_MAGIC = 0x07C0 + KAFS_SUPER_MAGIC = 0x6B414653 + LUSTRE_SUPER_MAGIC = 0x0BD00BD0 + NFSD_SUPER_MAGIC = 0x6E667364 + PANFS_SUPER_MAGIC = 0xAAD7AAEA + RPC_PIPEFS_SUPER_MAGIC = 0x67596969 + SECURITYFS_SUPER_MAGIC = 0x73636673 + UFS_BYTESWAPPED_SUPER_MAGIC = 0x54190100 + VMHGFS_SUPER_MAGIC = 0xBACBACBC + VZFS_SUPER_MAGIC = 0x565A4653 + ZFS_SUPER_MAGIC = 0x2FC12FC1 +) + +// coreutils/src/stat.c +var fsTypeMap = map[int64]string{ + ADFS_SUPER_MAGIC: "adfs", /* 0xADF5 local */ + AFFS_SUPER_MAGIC: "affs", /* 0xADFF local */ + AFS_SUPER_MAGIC: "afs", /* 0x5346414F remote */ + ANON_INODE_FS_SUPER_MAGIC: "anon-inode FS", /* 0x09041934 local */ + AUFS_SUPER_MAGIC: "aufs", /* 0x61756673 remote */ + // AUTOFS_SUPER_MAGIC: "autofs", /* 0x0187 local */ + BEFS_SUPER_MAGIC: "befs", /* 0x42465331 local */ + BDEVFS_MAGIC: "bdevfs", /* 0x62646576 local */ + BFS_MAGIC: "bfs", /* 0x1BADFACE local */ + BINFMTFS_MAGIC: "binfmt_misc", /* 0x42494E4D local */ + BTRFS_SUPER_MAGIC: "btrfs", /* 0x9123683E local */ + CEPH_SUPER_MAGIC: "ceph", /* 0x00C36400 remote */ + CGROUP_SUPER_MAGIC: "cgroupfs", /* 0x0027E0EB local */ + CIFS_MAGIC_NUMBER: "cifs", /* 0xFF534D42 remote */ + CODA_SUPER_MAGIC: "coda", /* 0x73757245 remote */ + COH_SUPER_MAGIC: "coh", /* 0x012FF7B7 local */ + CRAMFS_MAGIC: "cramfs", /* 0x28CD3D45 local */ + DEBUGFS_MAGIC: "debugfs", /* 0x64626720 local */ + DEVFS_SUPER_MAGIC: "devfs", /* 0x1373 local */ + DEVPTS_SUPER_MAGIC: "devpts", /* 0x1CD1 local */ + ECRYPTFS_SUPER_MAGIC: "ecryptfs", /* 0xF15F local */ + EFS_SUPER_MAGIC: "efs", /* 0x00414A53 local */ + EXT_SUPER_MAGIC: "ext", /* 0x137D local */ + EXT2_SUPER_MAGIC: "ext2/ext3", /* 0xEF53 local */ + EXT2_OLD_SUPER_MAGIC: "ext2", /* 0xEF51 local */ + FAT_SUPER_MAGIC: "fat", /* 0x4006 local */ + FHGFS_SUPER_MAGIC: "fhgfs", /* 0x19830326 remote */ + FUSEBLK_SUPER_MAGIC: "fuseblk", /* 0x65735546 remote */ + FUSECTL_SUPER_MAGIC: "fusectl", /* 0x65735543 remote */ + FUTEXFS_SUPER_MAGIC: "futexfs", /* 0x0BAD1DEA local */ + GFS_SUPER_MAGIC: "gfs/gfs2", /* 0x1161970 remote */ + GPFS_SUPER_MAGIC: "gpfs", /* 0x47504653 remote */ + HFS_SUPER_MAGIC: "hfs", /* 0x4244 local */ + HFSPLUS_SUPER_MAGIC: "hfsplus", /* 0x482b local */ + HPFS_SUPER_MAGIC: "hpfs", /* 0xF995E849 local */ + HUGETLBFS_MAGIC: "hugetlbfs", /* 0x958458F6 local */ + MTD_INODE_FS_SUPER_MAGIC: "inodefs", /* 0x11307854 local */ + INOTIFYFS_SUPER_MAGIC: "inotifyfs", /* 0x2BAD1DEA local */ + ISOFS_SUPER_MAGIC: "isofs", /* 0x9660 local */ + ISOFS_R_WIN_SUPER_MAGIC: "isofs", /* 0x4004 local */ + ISOFS_WIN_SUPER_MAGIC: "isofs", /* 0x4000 local */ + JFFS_SUPER_MAGIC: "jffs", /* 0x07C0 local */ + JFFS2_SUPER_MAGIC: "jffs2", /* 0x72B6 local */ + JFS_SUPER_MAGIC: "jfs", /* 0x3153464A local */ + KAFS_SUPER_MAGIC: "k-afs", /* 0x6B414653 remote */ + LUSTRE_SUPER_MAGIC: "lustre", /* 0x0BD00BD0 remote */ + MINIX_SUPER_MAGIC: "minix", /* 0x137F local */ + MINIX_SUPER_MAGIC2: "minix (30 char.)", /* 0x138F local */ + MINIX2_SUPER_MAGIC: "minix v2", /* 0x2468 local */ + MINIX2_SUPER_MAGIC2: "minix v2 (30 char.)", /* 0x2478 local */ + MINIX3_SUPER_MAGIC: "minix3", /* 0x4D5A local */ + MQUEUE_MAGIC: "mqueue", /* 0x19800202 local */ + MSDOS_SUPER_MAGIC: "msdos", /* 0x4D44 local */ + NCP_SUPER_MAGIC: "novell", /* 0x564C remote */ + NFS_SUPER_MAGIC: "nfs", /* 0x6969 remote */ + NFSD_SUPER_MAGIC: "nfsd", /* 0x6E667364 remote */ + NILFS_SUPER_MAGIC: "nilfs", /* 0x3434 local */ + NTFS_SB_MAGIC: "ntfs", /* 0x5346544E local */ + OPENPROM_SUPER_MAGIC: "openprom", /* 0x9FA1 local */ + OCFS2_SUPER_MAGIC: "ocfs2", /* 0x7461636f remote */ + PANFS_SUPER_MAGIC: "panfs", /* 0xAAD7AAEA remote */ + PIPEFS_MAGIC: "pipefs", /* 0x50495045 remote */ + PROC_SUPER_MAGIC: "proc", /* 0x9FA0 local */ + PSTOREFS_MAGIC: "pstorefs", /* 0x6165676C local */ + QNX4_SUPER_MAGIC: "qnx4", /* 0x002F local */ + QNX6_SUPER_MAGIC: "qnx6", /* 0x68191122 local */ + RAMFS_MAGIC: "ramfs", /* 0x858458F6 local */ + REISERFS_SUPER_MAGIC: "reiserfs", /* 0x52654973 local */ + ROMFS_MAGIC: "romfs", /* 0x7275 local */ + RPC_PIPEFS_SUPER_MAGIC: "rpc_pipefs", /* 0x67596969 local */ + SECURITYFS_SUPER_MAGIC: "securityfs", /* 0x73636673 local */ + SELINUX_MAGIC: "selinux", /* 0xF97CFF8C local */ + SMB_SUPER_MAGIC: "smb", /* 0x517B remote */ + SOCKFS_MAGIC: "sockfs", /* 0x534F434B local */ + SQUASHFS_MAGIC: "squashfs", /* 0x73717368 local */ + SYSFS_MAGIC: "sysfs", /* 0x62656572 local */ + SYSV2_SUPER_MAGIC: "sysv2", /* 0x012FF7B6 local */ + SYSV4_SUPER_MAGIC: "sysv4", /* 0x012FF7B5 local */ + TMPFS_MAGIC: "tmpfs", /* 0x01021994 local */ + UDF_SUPER_MAGIC: "udf", /* 0x15013346 local */ + UFS_MAGIC: "ufs", /* 0x00011954 local */ + UFS_BYTESWAPPED_SUPER_MAGIC: "ufs", /* 0x54190100 local */ + USBDEVICE_SUPER_MAGIC: "usbdevfs", /* 0x9FA2 local */ + V9FS_MAGIC: "v9fs", /* 0x01021997 local */ + VMHGFS_SUPER_MAGIC: "vmhgfs", /* 0xBACBACBC remote */ + VXFS_SUPER_MAGIC: "vxfs", /* 0xA501FCF5 local */ + VZFS_SUPER_MAGIC: "vzfs", /* 0x565A4653 local */ + XENFS_SUPER_MAGIC: "xenfs", /* 0xABBA1974 local */ + XENIX_SUPER_MAGIC: "xenix", /* 0x012FF7B4 local */ + XFS_SUPER_MAGIC: "xfs", /* 0x58465342 local */ + _XIAFS_SUPER_MAGIC: "xia", /* 0x012FD16D local */ + ZFS_SUPER_MAGIC: "zfs", /* 0x2FC12FC1 local */ +} + +// Partitions returns disk partitions. If all is false, returns +// physical devices only (e.g. hard disks, cd-rom drives, USB keys) +// and ignore all others (e.g. memory partitions such as /dev/shm) +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + useMounts := false + + filename := common.HostProc("self/mountinfo") + lines, err := common.ReadLines(filename) + if err != nil { + if err != err.(*os.PathError) { + return nil, err + } + // if kernel does not support self/mountinfo, fallback to self/mounts (<2.6.26) + useMounts = true + filename = common.HostProc("self/mounts") + lines, err = common.ReadLines(filename) + if err != nil { + return nil, err + } + } + + fs, err := getFileSystems() + if err != nil && !all { + return nil, err + } + + ret := make([]PartitionStat, 0, len(lines)) + + for _, line := range lines { + var d PartitionStat + if useMounts { + fields := strings.Fields(line) + + d = PartitionStat{ + Device: fields[0], + Mountpoint: unescapeFstab(fields[1]), + Fstype: fields[2], + Opts: fields[3], + } + + if !all { + if d.Device == "none" || !common.StringsHas(fs, d.Fstype) { + continue + } + } + } else { + // a line of self/mountinfo has the following structure: + // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + // (1) (2) (3) (4) (5) (6) (7) (8) (9) (10) (11) + + // split the mountinfo line by the separator hyphen + parts := strings.Split(line, " - ") + if len(parts) != 2 { + return nil, fmt.Errorf("found invalid mountinfo line in file %s: %s ", filename, line) + } + + fields := strings.Fields(parts[0]) + blockDeviceID := fields[2] + mountPoint := fields[4] + mountOpts := fields[5] + + fields = strings.Fields(parts[1]) + fstype := fields[0] + device := fields[1] + + d = PartitionStat{ + Device: device, + Mountpoint: unescapeFstab(mountPoint), + Fstype: fstype, + Opts: mountOpts, + } + + if !all { + if d.Device == "none" || !common.StringsHas(fs, d.Fstype) { + continue + } + } + + if strings.HasPrefix(d.Device, "/dev/mapper/") { + devpath, err := filepath.EvalSymlinks(d.Device) + if err == nil { + d.Device = devpath + } + } + + // /dev/root is not the real device name + // so we get the real device name from its major/minor number + if d.Device == "/dev/root" { + devpath, err := os.Readlink(common.HostSys("/dev/block/" + blockDeviceID)) + if err != nil { + return nil, err + } + d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1) + } + } + ret = append(ret, d) + } + + return ret, nil +} + +// getFileSystems returns supported filesystems from /proc/filesystems +func getFileSystems() ([]string, error) { + filename := common.HostProc("filesystems") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + var ret []string + for _, line := range lines { + if !strings.HasPrefix(line, "nodev") { + ret = append(ret, strings.TrimSpace(line)) + continue + } + t := strings.Split(line, "\t") + if len(t) != 2 || t[1] != "zfs" { + continue + } + ret = append(ret, strings.TrimSpace(t[1])) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + filename := common.HostProc("diskstats") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + ret := make(map[string]IOCountersStat, 0) + empty := IOCountersStat{} + + // use only basename such as "/dev/sda1" to "sda1" + for i, name := range names { + names[i] = filepath.Base(name) + } + + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) < 14 { + // malformed line in /proc/diskstats, avoid panic by ignoring. + continue + } + name := fields[2] + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + reads, err := strconv.ParseUint((fields[3]), 10, 64) + if err != nil { + return ret, err + } + mergedReads, err := strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return ret, err + } + rbytes, err := strconv.ParseUint((fields[5]), 10, 64) + if err != nil { + return ret, err + } + rtime, err := strconv.ParseUint((fields[6]), 10, 64) + if err != nil { + return ret, err + } + writes, err := strconv.ParseUint((fields[7]), 10, 64) + if err != nil { + return ret, err + } + mergedWrites, err := strconv.ParseUint((fields[8]), 10, 64) + if err != nil { + return ret, err + } + wbytes, err := strconv.ParseUint((fields[9]), 10, 64) + if err != nil { + return ret, err + } + wtime, err := strconv.ParseUint((fields[10]), 10, 64) + if err != nil { + return ret, err + } + iopsInProgress, err := strconv.ParseUint((fields[11]), 10, 64) + if err != nil { + return ret, err + } + iotime, err := strconv.ParseUint((fields[12]), 10, 64) + if err != nil { + return ret, err + } + weightedIO, err := strconv.ParseUint((fields[13]), 10, 64) + if err != nil { + return ret, err + } + d := IOCountersStat{ + ReadBytes: rbytes * SectorSize, + WriteBytes: wbytes * SectorSize, + ReadCount: reads, + WriteCount: writes, + MergedReadCount: mergedReads, + MergedWriteCount: mergedWrites, + ReadTime: rtime, + WriteTime: wtime, + IopsInProgress: iopsInProgress, + IoTime: iotime, + WeightedIO: weightedIO, + } + if d == empty { + continue + } + d.Name = name + + d.SerialNumber = GetDiskSerialNumber(name) + d.Label = GetLabel(name) + + ret[name] = d + } + return ret, nil +} + +// GetDiskSerialNumber returns Serial Number of given device or empty string +// on error. Name of device is expected, eg. /dev/sda +func GetDiskSerialNumber(name string) string { + return GetDiskSerialNumberWithContext(context.Background(), name) +} + +func GetDiskSerialNumberWithContext(ctx context.Context, name string) string { + var stat unix.Stat_t + err := unix.Stat(name, &stat) + if err != nil { + return "" + } + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) + + // Try to get the serial from udev data + udevDataPath := common.HostRun(fmt.Sprintf("udev/data/b%d:%d", major, minor)) + if udevdata, err := ioutil.ReadFile(udevDataPath); err == nil { + scanner := bufio.NewScanner(bytes.NewReader(udevdata)) + for scanner.Scan() { + values := strings.Split(scanner.Text(), "=") + if len(values) == 2 && values[0] == "E:ID_SERIAL" { + return values[1] + } + } + } + + // Try to get the serial from sysfs, look at the disk device (minor 0) directly + // because if it is a partition it is not going to contain any device information + devicePath := common.HostSys(fmt.Sprintf("dev/block/%d:0/device", major)) + model, _ := ioutil.ReadFile(filepath.Join(devicePath, "model")) + serial, _ := ioutil.ReadFile(filepath.Join(devicePath, "serial")) + if len(model) > 0 && len(serial) > 0 { + return fmt.Sprintf("%s_%s", string(model), string(serial)) + } + return "" +} + +// GetLabel returns label of given device or empty string on error. +// Name of device is expected, eg. /dev/sda +// Supports label based on devicemapper name +// See https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-block-dm +func GetLabel(name string) string { + // Try label based on devicemapper name + dmname_filename := common.HostSys(fmt.Sprintf("block/%s/dm/name", name)) + + if !common.PathExists(dmname_filename) { + return "" + } + + dmname, err := ioutil.ReadFile(dmname_filename) + if err != nil { + return "" + } else { + return strings.TrimSpace(string(dmname)) + } +} + +func getFsType(stat unix.Statfs_t) string { + t := int64(stat.Type) + ret, ok := fsTypeMap[t] + if !ok { + return "" + } + return ret +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go new file mode 100644 index 00000000..6fdf3863 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go @@ -0,0 +1,181 @@ +// +build openbsd + +package disk + +import ( + "bytes" + "context" + "encoding/binary" + "path" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + // get length + count, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + return ret, err + } + + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MNT_WAIT); err != nil { + return ret, err + } + + for _, stat := range fs { + opts := "rw" + if stat.F_flags&MNT_RDONLY != 0 { + opts = "ro" + } + if stat.F_flags&MNT_SYNCHRONOUS != 0 { + opts += ",sync" + } + if stat.F_flags&MNT_NOEXEC != 0 { + opts += ",noexec" + } + if stat.F_flags&MNT_NOSUID != 0 { + opts += ",nosuid" + } + if stat.F_flags&MNT_NODEV != 0 { + opts += ",nodev" + } + if stat.F_flags&MNT_ASYNC != 0 { + opts += ",async" + } + + d := PartitionStat{ + Device: common.IntToString(stat.F_mntfromname[:]), + Mountpoint: common.IntToString(stat.F_mntonname[:]), + Fstype: common.IntToString(stat.F_fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + ret := make(map[string]IOCountersStat) + + r, err := unix.SysctlRaw("hw.diskstats") + if err != nil { + return nil, err + } + buf := []byte(r) + length := len(buf) + + count := int(uint64(length) / uint64(sizeOfDiskstats)) + + // parse buf to Diskstats + for i := 0; i < count; i++ { + b := buf[i*sizeOfDiskstats : i*sizeOfDiskstats+sizeOfDiskstats] + d, err := parseDiskstats(b) + if err != nil { + continue + } + name := common.IntToString(d.Name[:]) + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + ds := IOCountersStat{ + ReadCount: d.Rxfer, + WriteCount: d.Wxfer, + ReadBytes: d.Rbytes, + WriteBytes: d.Wbytes, + Name: name, + } + ret[name] = ds + } + + return ret, nil +} + +// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE) + +// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go +// change Statfs_t to Statfs in order to get more information +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func parseDiskstats(buf []byte) (Diskstats, error) { + var ds Diskstats + br := bytes.NewReader(buf) + // err := binary.Read(br, binary.LittleEndian, &ds) + err := common.Read(br, binary.LittleEndian, &ds) + if err != nil { + return ds, err + } + + return ds, nil +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) + if err != nil { + return nil, err + } + bsize := stat.F_bsize + + ret := &UsageStat{ + Path: path, + Fstype: getFsType(stat), + Total: (uint64(stat.F_blocks) * uint64(bsize)), + Free: (uint64(stat.F_bavail) * uint64(bsize)), + InodesTotal: (uint64(stat.F_files)), + InodesFree: (uint64(stat.F_ffree)), + } + + ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) + ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 + ret.Used = (uint64(stat.F_blocks) - uint64(stat.F_bfree)) * uint64(bsize) + ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0 + + return ret, nil +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.F_fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_386.go b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_386.go new file mode 100644 index 00000000..bee3cc13 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_386.go @@ -0,0 +1,89 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_openbsd.go + +package disk + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_NODEV = 0x00000010 + MNT_ASYNC = 0x00000040 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 +) + +const ( + sizeOfDiskstats = 0x60 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_0 [2]byte + Mount_info [160]byte +} +type Diskstats struct { + Name [16]int8 + Busy int32 + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime Timeval + Timestamp Timeval + Time Timeval +} +type Fsid struct { + Val [2]int32 +} +type Timeval struct { + Sec int64 + Usec int32 +} + +type Diskstat struct{} +type Bintime struct{} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go new file mode 100644 index 00000000..07a845fb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go @@ -0,0 +1,91 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package disk + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_NODEV = 0x00000010 + MNT_ASYNC = 0x00000040 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 +) + +const ( + sizeOfDiskstats = 0x70 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + Pad_cgo_0 [4]byte + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_1 [2]byte + Mount_info [160]byte +} +type Diskstats struct { + Name [16]int8 + Busy int32 + Pad_cgo_0 [4]byte + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime Timeval + Timestamp Timeval + Time Timeval +} +type Fsid struct { + Val [2]int32 +} +type Timeval struct { + Sec int64 + Usec int64 +} + +type Diskstat struct{} +type Bintime struct{} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go b/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go new file mode 100644 index 00000000..c6608357 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go @@ -0,0 +1,127 @@ +// +build solaris + +package disk + +import ( + "bufio" + "context" + "fmt" + "math" + "os" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +const ( + // _DEFAULT_NUM_MOUNTS is set to `cat /etc/mnttab | wc -l` rounded up to the + // nearest power of two. + _DEFAULT_NUM_MOUNTS = 32 + + // _MNTTAB default place to read mount information + _MNTTAB = "/etc/mnttab" +) + +var ( + // A blacklist of read-only virtual filesystems. Writable filesystems are of + // operational concern and must not be included in this list. + fsTypeBlacklist = map[string]struct{}{ + "ctfs": struct{}{}, + "dev": struct{}{}, + "fd": struct{}{}, + "lofs": struct{}{}, + "lxproc": struct{}{}, + "mntfs": struct{}{}, + "objfs": struct{}{}, + "proc": struct{}{}, + } +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + ret := make([]PartitionStat, 0, _DEFAULT_NUM_MOUNTS) + + // Scan mnttab(4) + f, err := os.Open(_MNTTAB) + if err != nil { + } + defer func() { + if err == nil { + err = f.Close() + } else { + f.Close() + } + }() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := strings.Split(scanner.Text(), "\t") + + if _, found := fsTypeBlacklist[fields[2]]; found { + continue + } + + ret = append(ret, PartitionStat{ + // NOTE(seanc@): Device isn't exactly accurate: from mnttab(4): "The name + // of the resource that has been mounted." Ideally this value would come + // from Statvfs_t.Fsid but I'm leaving it to the caller to traverse + // unix.Statvfs(). + Device: fields[0], + Mountpoint: fields[1], + Fstype: fields[2], + Opts: fields[3], + }) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err) + } + + return ret, err +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + statvfs := unix.Statvfs_t{} + if err := unix.Statvfs(path, &statvfs); err != nil { + return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err) + } + + usageStat := &UsageStat{ + Path: path, + Fstype: common.IntToString(statvfs.Basetype[:]), + Total: statvfs.Blocks * statvfs.Frsize, + Free: statvfs.Bfree * statvfs.Frsize, + Used: (statvfs.Blocks - statvfs.Bfree) * statvfs.Frsize, + + // NOTE: ZFS (and FreeBZSD's UFS2) use dynamic inode/dnode allocation. + // Explicitly return a near-zero value for InodesUsedPercent so that nothing + // attempts to garbage collect based on a lack of available inodes/dnodes. + // Similarly, don't use the zero value to prevent divide-by-zero situations + // and inject a faux near-zero value. Filesystems evolve. Has your + // filesystem evolved? Probably not if you care about the number of + // available inodes. + InodesTotal: 1024.0 * 1024.0, + InodesUsed: 1024.0, + InodesFree: math.MaxUint64, + InodesUsedPercent: (1024.0 / (1024.0 * 1024.0)) * 100.0, + } + + usageStat.UsedPercent = (float64(usageStat.Used) / float64(usageStat.Total)) * 100.0 + + return usageStat, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_unix.go b/vendor/github.com/shirou/gopsutil/disk/disk_unix.go new file mode 100644 index 00000000..86ab99cb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_unix.go @@ -0,0 +1,68 @@ +// +build freebsd linux darwin + +package disk + +import ( + "context" + "strconv" + + "golang.org/x/sys/unix" +) + +// Usage returns a file system usage. path is a filesystem path such +// as "/", not device file path like "/dev/vda1". If you want to use +// a return value of disk.Partitions, use "Mountpoint" not "Device". +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) + if err != nil { + return nil, err + } + bsize := stat.Bsize + + ret := &UsageStat{ + Path: unescapeFstab(path), + Fstype: getFsType(stat), + Total: (uint64(stat.Blocks) * uint64(bsize)), + Free: (uint64(stat.Bavail) * uint64(bsize)), + InodesTotal: (uint64(stat.Files)), + InodesFree: (uint64(stat.Ffree)), + } + + // if could not get InodesTotal, return empty + if ret.InodesTotal < ret.InodesFree { + return ret, nil + } + + ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) + ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) + + if ret.InodesTotal == 0 { + ret.InodesUsedPercent = 0 + } else { + ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 + } + + if (ret.Used + ret.Free) == 0 { + ret.UsedPercent = 0 + } else { + // We don't use ret.Total to calculate percent. + // see https://github.com/shirou/gopsutil/issues/562 + ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 + } + + return ret, nil +} + +// Unescape escaped octal chars (like space 040, ampersand 046 and backslash 134) to their real value in fstab fields issue#555 +func unescapeFstab(path string) string { + escaped, err := strconv.Unquote(`"` + path + `"`) + if err != nil { + return path + } + return escaped +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/disk/disk_windows.go new file mode 100644 index 00000000..02c965dd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_windows.go @@ -0,0 +1,166 @@ +// +build windows + +package disk + +import ( + "bytes" + "context" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW") + procGetDriveType = common.Modkernel32.NewProc("GetDriveTypeW") + procGetVolumeInformation = common.Modkernel32.NewProc("GetVolumeInformationW") +) + +var ( + FileFileCompression = int64(16) // 0x00000010 + FileReadOnlyVolume = int64(524288) // 0x00080000 +) + +type Win32_PerfFormattedData struct { + Name string + AvgDiskBytesPerRead uint64 + AvgDiskBytesPerWrite uint64 + AvgDiskReadQueueLength uint64 + AvgDiskWriteQueueLength uint64 + AvgDisksecPerRead uint64 + AvgDisksecPerWrite uint64 +} + +const WaitMSec = 500 + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + lpFreeBytesAvailable := int64(0) + lpTotalNumberOfBytes := int64(0) + lpTotalNumberOfFreeBytes := int64(0) + diskret, _, err := procGetDiskFreeSpaceExW.Call( + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(path))), + uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), + uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)), + uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes))) + if diskret == 0 { + return nil, err + } + ret := &UsageStat{ + Path: path, + Total: uint64(lpTotalNumberOfBytes), + Free: uint64(lpTotalNumberOfFreeBytes), + Used: uint64(lpTotalNumberOfBytes) - uint64(lpTotalNumberOfFreeBytes), + UsedPercent: (float64(lpTotalNumberOfBytes) - float64(lpTotalNumberOfFreeBytes)) / float64(lpTotalNumberOfBytes) * 100, + // InodesTotal: 0, + // InodesFree: 0, + // InodesUsed: 0, + // InodesUsedPercent: 0, + } + return ret, nil +} + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + lpBuffer := make([]byte, 254) + diskret, _, err := procGetLogicalDriveStringsW.Call( + uintptr(len(lpBuffer)), + uintptr(unsafe.Pointer(&lpBuffer[0]))) + if diskret == 0 { + return ret, err + } + for _, v := range lpBuffer { + if v >= 65 && v <= 90 { + path := string(v) + ":" + typepath, _ := windows.UTF16PtrFromString(path) + typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) + if typeret == 0 { + return ret, windows.GetLastError() + } + // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM + + if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 { + lpVolumeNameBuffer := make([]byte, 256) + lpVolumeSerialNumber := int64(0) + lpMaximumComponentLength := int64(0) + lpFileSystemFlags := int64(0) + lpFileSystemNameBuffer := make([]byte, 256) + volpath, _ := windows.UTF16PtrFromString(string(v) + ":/") + driveret, _, err := procGetVolumeInformation.Call( + uintptr(unsafe.Pointer(volpath)), + uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])), + uintptr(len(lpVolumeNameBuffer)), + uintptr(unsafe.Pointer(&lpVolumeSerialNumber)), + uintptr(unsafe.Pointer(&lpMaximumComponentLength)), + uintptr(unsafe.Pointer(&lpFileSystemFlags)), + uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])), + uintptr(len(lpFileSystemNameBuffer))) + if driveret == 0 { + if typeret == 5 || typeret == 2 { + continue //device is not ready will happen if there is no disk in the drive + } + return ret, err + } + opts := "rw" + if lpFileSystemFlags&FileReadOnlyVolume != 0 { + opts = "ro" + } + if lpFileSystemFlags&FileFileCompression != 0 { + opts += ".compress" + } + + d := PartitionStat{ + Mountpoint: path, + Device: path, + Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)), + Opts: opts, + } + ret = append(ret, d) + } + } + } + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + ret := make(map[string]IOCountersStat, 0) + var dst []Win32_PerfFormattedData + + err := common.WMIQueryWithContext(ctx, "SELECT * FROM Win32_PerfFormattedData_PerfDisk_LogicalDisk", &dst) + if err != nil { + return ret, err + } + for _, d := range dst { + if len(d.Name) > 3 { // not get _Total or Harddrive + continue + } + + if len(names) > 0 && !common.StringsHas(names, d.Name) { + continue + } + + ret[d.Name] = IOCountersStat{ + Name: d.Name, + ReadCount: uint64(d.AvgDiskReadQueueLength), + WriteCount: d.AvgDiskWriteQueueLength, + ReadBytes: uint64(d.AvgDiskBytesPerRead), + WriteBytes: uint64(d.AvgDiskBytesPerWrite), + ReadTime: d.AvgDisksecPerRead, + WriteTime: d.AvgDisksecPerWrite, + } + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/host/host.go b/vendor/github.com/shirou/gopsutil/host/host.go new file mode 100644 index 00000000..e100bc5f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host.go @@ -0,0 +1,54 @@ +package host + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +// A HostInfoStat describes the host status. +// This is not in the psutil but it useful. +type InfoStat struct { + Hostname string `json:"hostname"` + Uptime uint64 `json:"uptime"` + BootTime uint64 `json:"bootTime"` + Procs uint64 `json:"procs"` // number of processes + OS string `json:"os"` // ex: freebsd, linux + Platform string `json:"platform"` // ex: ubuntu, linuxmint + PlatformFamily string `json:"platformFamily"` // ex: debian, rhel + PlatformVersion string `json:"platformVersion"` // version of the complete OS + KernelVersion string `json:"kernelVersion"` // version of the OS kernel (if available) + KernelArch string `json:"kernelArch"` // native cpu architecture queried at runtime, as returned by `uname -m` or empty string in case of error + VirtualizationSystem string `json:"virtualizationSystem"` + VirtualizationRole string `json:"virtualizationRole"` // guest or host + HostID string `json:"hostid"` // ex: uuid +} + +type UserStat struct { + User string `json:"user"` + Terminal string `json:"terminal"` + Host string `json:"host"` + Started int `json:"started"` +} + +type TemperatureStat struct { + SensorKey string `json:"sensorKey"` + Temperature float64 `json:"sensorTemperature"` +} + +func (h InfoStat) String() string { + s, _ := json.Marshal(h) + return string(s) +} + +func (u UserStat) String() string { + s, _ := json.Marshal(u) + return string(s) +} + +func (t TemperatureStat) String() string { + s, _ := json.Marshal(t) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/host/host_darwin.go new file mode 100644 index 00000000..d40457ca --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin.go @@ -0,0 +1,233 @@ +// +build darwin + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "sync/atomic" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +// from utmpx.h +const USER_PROCESS = 7 + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "darwin", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + kernelVersion, err := KernelVersionWithContext(ctx) + if err == nil { + ret.KernelVersion = kernelVersion + } + + kernelArch, err := kernelArch() + if err == nil { + ret.KernelArch = kernelArch + } + + platform, family, pver, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = pver + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + uuid, err := unix.Sysctl("kern.uuid") + if err == nil && uuid != "" { + ret.HostID = strings.ToLower(uuid) + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + // https://github.com/AaronO/dashd/blob/222e32ef9f7a1f9bea4a8da2c3627c4cb992f860/probe/probe_darwin.go + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + value, err := unix.Sysctl("kern.boottime") + if err != nil { + return 0, err + } + bytes := []byte(value[:]) + var boottime uint64 + boottime = uint64(bytes[0]) + uint64(bytes[1])*256 + uint64(bytes[2])*256*256 + uint64(bytes[3])*256*256*256 + + atomic.StoreUint64(&cachedBootTime, boottime) + + return boottime, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTimeWithContext(ctx) + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := "/var/run/utmpx" + var ret []UserStat + + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmpx{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + + var u Utmpx + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil { + continue + } + if u.Type != USER_PROCESS { + continue + } + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Tv.Sec), + } + ret = append(ret, user) + } + + return ret, nil + +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + pver := "" + + sw_vers, err := exec.LookPath("sw_vers") + if err != nil { + return "", "", "", err + } + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + + out, err := invoke.CommandWithContext(ctx, sw_vers, "-productVersion") + if err == nil { + pver = strings.ToLower(strings.TrimSpace(string(out))) + } + + // check if the macos server version file exists + _, err = os.Stat("/System/Library/CoreServices/ServerVersion.plist") + + // server file doesn't exist + if os.IsNotExist(err) { + family = "Standalone Workstation" + } else { + family = "Server" + } + + return platform, family, pver, nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + version, err := unix.Sysctl("kern.osrelease") + return strings.ToLower(version), err +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go b/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go new file mode 100644 index 00000000..c3596f9f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go @@ -0,0 +1,19 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package host + +type Utmpx struct { + User [256]int8 + ID [4]int8 + Line [32]int8 + Pid int32 + Type int16 + Pad_cgo_0 [6]byte + Tv Timeval + Host [256]int8 + Pad [16]uint32 +} +type Timeval struct { + Sec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go new file mode 100644 index 00000000..c3596f9f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go @@ -0,0 +1,19 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package host + +type Utmpx struct { + User [256]int8 + ID [4]int8 + Line [32]int8 + Pid int32 + Type int16 + Pad_cgo_0 [6]byte + Tv Timeval + Host [256]int8 + Pad [16]uint32 +} +type Timeval struct { + Sec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/host/host_fallback.go new file mode 100644 index 00000000..e80d7ea3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_fallback.go @@ -0,0 +1,65 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package host + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + return []UserStat{}, common.ErrNotImplementedError +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + return "", "", "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd.go new file mode 100644 index 00000000..6dc4bc18 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd.go @@ -0,0 +1,251 @@ +// +build freebsd + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "math" + "os" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +const ( + UTNameSize = 16 /* see MAXLOGNAME in */ + UTLineSize = 8 + UTHostSize = 16 +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "freebsd", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + ret.KernelVersion = version + } + + kernelArch, err := kernelArch() + if err == nil { + ret.KernelArch = kernelArch + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + hostid, err := unix.Sysctl("kern.hostuuid") + if err == nil && hostid != "" { + ret.HostID = strings.ToLower(hostid) + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + buf, err := unix.SysctlRaw("kern.boottime") + if err != nil { + return 0, err + } + + tv := *(*syscall.Timeval)(unsafe.Pointer((&buf[0]))) + atomic.StoreUint64(&cachedBootTime, uint64(tv.Sec)) + + return t, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := "/var/run/utx.active" + if !common.PathExists(utmpfile) { + utmpfile = "/var/run/utmp" // before 9.0 + return getUsersFromUtmp(utmpfile) + } + + var ret []UserStat + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + entrySize := sizeOfUtmpx + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*sizeOfUtmpx : (i+1)*sizeOfUtmpx] + var u Utmpx + br := bytes.NewReader(b) + err := binary.Read(br, binary.BigEndian, &u) + if err != nil || u.Type != 4 { + continue + } + sec := math.Floor(float64(u.Tv) / 1000000) + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(sec), + } + + ret = append(ret, user) + } + + return ret, nil + +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform, err := unix.Sysctl("kern.ostype") + if err != nil { + return "", "", "", err + } + + version, err := unix.Sysctl("kern.osrelease") + if err != nil { + return "", "", "", err + } + + return strings.ToLower(platform), "", strings.ToLower(version), nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +// before 9.0 +func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { + var ret []UserStat + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmp{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + var u Utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil || u.Time == 0 { + continue + } + user := UserStat{ + User: common.IntToString(u.Name[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Time), + } + + ret = append(ret, user) + } + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go new file mode 100644 index 00000000..88453d2a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go @@ -0,0 +1,37 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmpx = 0xc5 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type uint8 + Tv uint64 + Id [8]int8 + Pid uint32 + User [32]int8 + Line [16]int8 + Host [128]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go new file mode 100644 index 00000000..8af74b0f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go @@ -0,0 +1,37 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmpx = 0xc5 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type uint8 + Tv uint64 + Id [8]int8 + Pid uint32 + User [32]int8 + Line [16]int8 + Host [128]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go new file mode 100644 index 00000000..f7d6ede5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go @@ -0,0 +1,37 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmpx = 0xc5 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type uint8 + Tv uint64 + Id [8]int8 + Pid uint32 + User [32]int8 + Line [16]int8 + Host [128]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux.go b/vendor/github.com/shirou/gopsutil/host/host_linux.go new file mode 100644 index 00000000..272016df --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux.go @@ -0,0 +1,510 @@ +// +build linux + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +type LSB struct { + ID string + Release string + Codename string + Description string +} + +// from utmp.h +const USER_PROCESS = 7 + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } + kernelVersion, err := KernelVersion() + if err == nil { + ret.KernelVersion = kernelVersion + } + + kernelArch, err := kernelArch() + if err == nil { + ret.KernelArch = kernelArch + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + if numProcs, err := common.NumProcs(); err == nil { + ret.Procs = numProcs + } + + sysProductUUID := common.HostSys("class/dmi/id/product_uuid") + machineID := common.HostEtc("machine-id") + procSysKernelRandomBootID := common.HostProc("sys/kernel/random/boot_id") + switch { + // In order to read this file, needs to be supported by kernel/arch and run as root + // so having fallback is important + case common.PathExists(sysProductUUID): + lines, err := common.ReadLines(sysProductUUID) + if err == nil && len(lines) > 0 && lines[0] != "" { + ret.HostID = strings.ToLower(lines[0]) + break + } + fallthrough + // Fallback on GNU Linux systems with systemd, readable by everyone + case common.PathExists(machineID): + lines, err := common.ReadLines(machineID) + if err == nil && len(lines) > 0 && len(lines[0]) == 32 { + st := lines[0] + ret.HostID = fmt.Sprintf("%s-%s-%s-%s-%s", st[0:8], st[8:12], st[12:16], st[16:20], st[20:32]) + break + } + fallthrough + // Not stable between reboot, but better than nothing + default: + lines, err := common.ReadLines(procSysKernelRandomBootID) + if err == nil && len(lines) > 0 && lines[0] != "" { + ret.HostID = strings.ToLower(lines[0]) + } + } + + return ret, nil +} + +// BootTime returns the system boot time expressed in seconds since the epoch. +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + return common.BootTimeWithContext(ctx) +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := common.HostVar("run/utmp") + + file, err := os.Open(utmpfile) + if err != nil { + return nil, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + count := len(buf) / sizeOfUtmp + + ret := make([]UserStat, 0, count) + + for i := 0; i < count; i++ { + b := buf[i*sizeOfUtmp : (i+1)*sizeOfUtmp] + + var u utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil { + continue + } + if u.Type != USER_PROCESS { + continue + } + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Tv.Sec), + } + ret = append(ret, user) + } + + return ret, nil + +} + +func getLSB() (*LSB, error) { + ret := &LSB{} + if common.PathExists(common.HostEtc("lsb-release")) { + contents, err := common.ReadLines(common.HostEtc("lsb-release")) + if err != nil { + return ret, err // return empty + } + for _, line := range contents { + field := strings.Split(line, "=") + if len(field) < 2 { + continue + } + switch field[0] { + case "DISTRIB_ID": + ret.ID = field[1] + case "DISTRIB_RELEASE": + ret.Release = field[1] + case "DISTRIB_CODENAME": + ret.Codename = field[1] + case "DISTRIB_DESCRIPTION": + ret.Description = field[1] + } + } + } else if common.PathExists("/usr/bin/lsb_release") { + lsb_release, err := exec.LookPath("lsb_release") + if err != nil { + return ret, err + } + out, err := invoke.Command(lsb_release) + if err != nil { + return ret, err + } + for _, line := range strings.Split(string(out), "\n") { + field := strings.Split(line, ":") + if len(field) < 2 { + continue + } + switch field[0] { + case "Distributor ID": + ret.ID = field[1] + case "Release": + ret.Release = field[1] + case "Codename": + ret.Codename = field[1] + case "Description": + ret.Description = field[1] + } + } + + } + + return ret, nil +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + + lsb, err := getLSB() + if err != nil { + lsb = &LSB{} + } + + if common.PathExists(common.HostEtc("oracle-release")) { + platform = "oracle" + contents, err := common.ReadLines(common.HostEtc("oracle-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + + } else if common.PathExists(common.HostEtc("enterprise-release")) { + platform = "oracle" + contents, err := common.ReadLines(common.HostEtc("enterprise-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + } else if common.PathExists(common.HostEtc("slackware-version")) { + platform = "slackware" + contents, err := common.ReadLines(common.HostEtc("slackware-version")) + if err == nil { + version = getSlackwareVersion(contents) + } + } else if common.PathExists(common.HostEtc("debian_version")) { + if lsb.ID == "Ubuntu" { + platform = "ubuntu" + version = lsb.Release + } else if lsb.ID == "LinuxMint" { + platform = "linuxmint" + version = lsb.Release + } else { + if common.PathExists("/usr/bin/raspi-config") { + platform = "raspbian" + } else { + platform = "debian" + } + contents, err := common.ReadLines(common.HostEtc("debian_version")) + if err == nil { + version = contents[0] + } + } + } else if common.PathExists(common.HostEtc("redhat-release")) { + contents, err := common.ReadLines(common.HostEtc("redhat-release")) + if err == nil { + version = getRedhatishVersion(contents) + platform = getRedhatishPlatform(contents) + } + } else if common.PathExists(common.HostEtc("system-release")) { + contents, err := common.ReadLines(common.HostEtc("system-release")) + if err == nil { + version = getRedhatishVersion(contents) + platform = getRedhatishPlatform(contents) + } + } else if common.PathExists(common.HostEtc("gentoo-release")) { + platform = "gentoo" + contents, err := common.ReadLines(common.HostEtc("gentoo-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + } else if common.PathExists(common.HostEtc("SuSE-release")) { + contents, err := common.ReadLines(common.HostEtc("SuSE-release")) + if err == nil { + version = getSuseVersion(contents) + platform = getSusePlatform(contents) + } + // TODO: slackware detecion + } else if common.PathExists(common.HostEtc("arch-release")) { + platform = "arch" + version = lsb.Release + } else if common.PathExists(common.HostEtc("alpine-release")) { + platform = "alpine" + contents, err := common.ReadLines(common.HostEtc("alpine-release")) + if err == nil && len(contents) > 0 { + version = contents[0] + } + } else if common.PathExists(common.HostEtc("os-release")) { + p, v, err := common.GetOSRelease() + if err == nil { + platform = p + version = v + } + } else if lsb.ID == "RedHat" { + platform = "redhat" + version = lsb.Release + } else if lsb.ID == "Amazon" { + platform = "amazon" + version = lsb.Release + } else if lsb.ID == "ScientificSL" { + platform = "scientific" + version = lsb.Release + } else if lsb.ID == "XenServer" { + platform = "xenserver" + version = lsb.Release + } else if lsb.ID != "" { + platform = strings.ToLower(lsb.ID) + version = lsb.Release + } + + switch platform { + case "debian", "ubuntu", "linuxmint", "raspbian": + family = "debian" + case "fedora": + family = "fedora" + case "oracle", "centos", "redhat", "scientific", "enterpriseenterprise", "amazon", "xenserver", "cloudlinux", "ibm_powerkvm": + family = "rhel" + case "suse", "opensuse", "sles": + family = "suse" + case "gentoo": + family = "gentoo" + case "slackware": + family = "slackware" + case "arch": + family = "arch" + case "exherbo": + family = "exherbo" + case "alpine": + family = "alpine" + case "coreos": + family = "coreos" + case "solus": + family = "solus" + } + + return platform, family, version, nil + +} + +func KernelVersion() (version string, err error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (version string, err error) { + var utsname unix.Utsname + err = unix.Uname(&utsname) + if err != nil { + return "", err + } + return string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), nil +} + +func getSlackwareVersion(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + c = strings.Replace(c, "slackware ", "", 1) + return c +} + +func getRedhatishVersion(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + + if strings.Contains(c, "rawhide") { + return "rawhide" + } + if matches := regexp.MustCompile(`release (\d[\d.]*)`).FindStringSubmatch(c); matches != nil { + return matches[1] + } + return "" +} + +func getRedhatishPlatform(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + + if strings.Contains(c, "red hat") { + return "redhat" + } + f := strings.Split(c, " ") + + return f[0] +} + +func getSuseVersion(contents []string) string { + version := "" + for _, line := range contents { + if matches := regexp.MustCompile(`VERSION = ([\d.]+)`).FindStringSubmatch(line); matches != nil { + version = matches[1] + } else if matches := regexp.MustCompile(`PATCHLEVEL = ([\d]+)`).FindStringSubmatch(line); matches != nil { + version = version + "." + matches[1] + } + } + return version +} + +func getSusePlatform(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + if strings.Contains(c, "opensuse") { + return "opensuse" + } + return "suse" +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return common.VirtualizationWithContext(ctx) +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + var temperatures []TemperatureStat + files, err := filepath.Glob(common.HostSys("/class/hwmon/hwmon*/temp*_*")) + if err != nil { + return temperatures, err + } + if len(files) == 0 { + // CentOS has an intermediate /device directory: + // https://github.com/giampaolo/psutil/issues/971 + files, err = filepath.Glob(common.HostSys("/class/hwmon/hwmon*/device/temp*_*")) + if err != nil { + return temperatures, err + } + } + var warns Warnings + + // example directory + // device/ temp1_crit_alarm temp2_crit_alarm temp3_crit_alarm temp4_crit_alarm temp5_crit_alarm temp6_crit_alarm temp7_crit_alarm + // name temp1_input temp2_input temp3_input temp4_input temp5_input temp6_input temp7_input + // power/ temp1_label temp2_label temp3_label temp4_label temp5_label temp6_label temp7_label + // subsystem/ temp1_max temp2_max temp3_max temp4_max temp5_max temp6_max temp7_max + // temp1_crit temp2_crit temp3_crit temp4_crit temp5_crit temp6_crit temp7_crit uevent + for _, file := range files { + filename := strings.Split(filepath.Base(file), "_") + if filename[1] == "label" { + // Do not try to read the temperature of the label file + continue + } + + // Get the label of the temperature you are reading + var label string + c, _ := ioutil.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label")) + if c != nil { + //format the label from "Core 0" to "core0_" + label = fmt.Sprintf("%s_", strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "")) + } + + // Get the name of the temperature you are reading + name, err := ioutil.ReadFile(filepath.Join(filepath.Dir(file), "name")) + if err != nil { + warns.Add(err) + continue + } + + // Get the temperature reading + current, err := ioutil.ReadFile(file) + if err != nil { + warns.Add(err) + continue + } + temperature, err := strconv.ParseFloat(strings.TrimSpace(string(current)), 64) + if err != nil { + warns.Add(err) + continue + } + + tempName := strings.TrimSpace(strings.ToLower(string(strings.Join(filename[1:], "")))) + temperatures = append(temperatures, TemperatureStat{ + SensorKey: fmt.Sprintf("%s_%s%s", strings.TrimSpace(string(name)), label, tempName), + Temperature: temperature / 1000.0, + }) + } + return temperatures, warns.Reference() +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_386.go b/vendor/github.com/shirou/gopsutil/host/host_linux_386.go new file mode 100644 index 00000000..79b5cb5d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_386.go @@ -0,0 +1,45 @@ +// ATTENTION - FILE MANUAL FIXED AFTER CGO. +// Fixed line: Tv _Ctype_struct_timeval -> Tv UtTv +// Created by cgo -godefs, MANUAL FIXED +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + ID [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv UtTv + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type UtTv struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go new file mode 100644 index 00000000..9a69652f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go @@ -0,0 +1,48 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv _Ctype_struct___0 + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} + +type _Ctype_struct___0 struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go b/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go new file mode 100644 index 00000000..e2cf4485 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go | sed "s/uint8/int8/g" + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go new file mode 100644 index 00000000..37dbe5c8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go new file mode 100644 index 00000000..b0fca093 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go new file mode 100644 index 00000000..b0fca093 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go new file mode 100644 index 00000000..b0fca093 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go new file mode 100644 index 00000000..b0fca093 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go b/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go new file mode 100644 index 00000000..d081a081 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go @@ -0,0 +1,45 @@ +// +build linux +// +build ppc64le +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go b/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go new file mode 100644 index 00000000..083fbf92 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go @@ -0,0 +1,45 @@ +// +build linux +// +build s390x +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_openbsd.go b/vendor/github.com/shirou/gopsutil/host/host_openbsd.go new file mode 100644 index 00000000..d1501e9c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_openbsd.go @@ -0,0 +1,203 @@ +// +build openbsd + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "os" + "runtime" + "strings" + "sync/atomic" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +const ( + UTNameSize = 32 /* see MAXLOGNAME in */ + UTLineSize = 8 + UTHostSize = 16 +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "openbsd", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + kernelArch, err := kernelArch() + if err == nil { + ret.KernelArch = kernelArch + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + // https://github.com/AaronO/dashd/blob/222e32ef9f7a1f9bea4a8da2c3627c4cb992f860/probe/probe_darwin.go + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + value, err := unix.Sysctl("kern.boottime") + if err != nil { + return 0, err + } + bytes := []byte(value[:]) + var boottime uint64 + boottime = uint64(bytes[0]) + uint64(bytes[1])*256 + uint64(bytes[2])*256*256 + uint64(bytes[3])*256*256*256 + + atomic.StoreUint64(&cachedBootTime, boottime) + + return boottime, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + version := "" + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + v, err := unix.Sysctl("kern.osrelease") + if err == nil { + version = strings.ToLower(v) + } + + return platform, family, version, nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + utmpfile := "/var/run/utmp" + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmp{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + var u Utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil || u.Time == 0 { + continue + } + user := UserStat{ + User: common.IntToString(u.Name[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Time), + } + + ret = append(ret, user) + } + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go new file mode 100644 index 00000000..afe0943e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x130 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [32]int8 + Host [256]int8 + Time int64 +} +type Timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_posix.go b/vendor/github.com/shirou/gopsutil/host/host_posix.go new file mode 100644 index 00000000..a1b2479e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_posix.go @@ -0,0 +1,15 @@ +// +build linux freebsd openbsd darwin solaris + +package host + +import ( + "bytes" + + "golang.org/x/sys/unix" +) + +func kernelArch() (string, error) { + var utsname unix.Utsname + err := unix.Uname(&utsname) + return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), err +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/host/host_solaris.go new file mode 100644 index 00000000..c6061b8e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_solaris.go @@ -0,0 +1,253 @@ +package host + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/internal/common" +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + result := &InfoStat{ + OS: runtime.GOOS, + } + + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + result.Hostname = hostname + + // Parse versions from output of `uname(1)` + uname, err := exec.LookPath("uname") + if err != nil { + return nil, err + } + + out, err := invoke.CommandWithContext(ctx, uname, "-srv") + if err != nil { + return nil, err + } + + fields := strings.Fields(string(out)) + if len(fields) >= 1 { + result.PlatformFamily = fields[0] + } + if len(fields) >= 2 { + result.KernelVersion = fields[1] + } + if len(fields) == 3 { + result.PlatformVersion = fields[2] + } + + kernelArch, err := kernelArch() + if err == nil { + result.KernelArch = kernelArch + } + + // Find distribution name from /etc/release + fh, err := os.Open("/etc/release") + if err != nil { + return nil, err + } + defer fh.Close() + + sc := bufio.NewScanner(fh) + if sc.Scan() { + line := strings.TrimSpace(sc.Text()) + switch { + case strings.HasPrefix(line, "SmartOS"): + result.Platform = "SmartOS" + case strings.HasPrefix(line, "OpenIndiana"): + result.Platform = "OpenIndiana" + case strings.HasPrefix(line, "OmniOS"): + result.Platform = "OmniOS" + case strings.HasPrefix(line, "Open Storage"): + result.Platform = "NexentaStor" + case strings.HasPrefix(line, "Solaris"): + result.Platform = "Solaris" + case strings.HasPrefix(line, "Oracle Solaris"): + result.Platform = "Solaris" + default: + result.Platform = strings.Fields(line)[0] + } + } + + switch result.Platform { + case "SmartOS": + // If everything works, use the current zone ID as the HostID if present. + zonename, err := exec.LookPath("zonename") + if err == nil { + out, err := invoke.CommandWithContext(ctx, zonename) + if err == nil { + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + + // If we're in the global zone, rely on the hostname. + if line == "global" { + hostname, err := os.Hostname() + if err == nil { + result.HostID = hostname + } + } else { + result.HostID = strings.TrimSpace(line) + break + } + } + } + } + } + + // If HostID is still empty, use hostid(1), which can lie to callers but at + // this point there are no hardware facilities available. This behavior + // matches that of other supported OSes. + if result.HostID == "" { + hostID, err := exec.LookPath("hostid") + if err == nil { + out, err := invoke.CommandWithContext(ctx, hostID) + if err == nil { + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + result.HostID = strings.TrimSpace(line) + break + } + } + } + } + + // Find the boot time and calculate uptime relative to it + bootTime, err := BootTime() + if err != nil { + return nil, err + } + result.BootTime = bootTime + result.Uptime = uptimeSince(bootTime) + + // Count number of processes based on the number of entries in /proc + dirs, err := ioutil.ReadDir("/proc") + if err != nil { + return nil, err + } + result.Procs = uint64(len(dirs)) + + return result, nil +} + +var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + kstat, err := exec.LookPath("kstat") + if err != nil { + return 0, err + } + + out, err := invoke.CommandWithContext(ctx, kstat, "-p", "unix:0:system_misc:boot_time") + if err != nil { + return 0, err + } + + kstats := kstatMatch.FindAllStringSubmatch(string(out), -1) + if len(kstats) != 1 { + return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats)) + } + + return strconv.ParseUint(kstats[0][2], 10, 64) +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + bootTime, err := BootTime() + if err != nil { + return 0, err + } + return uptimeSince(bootTime), nil +} + +func uptimeSince(since uint64) uint64 { + return uint64(time.Now().Unix()) - since +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + return []UserStat{}, common.ErrNotImplementedError +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + // Parse versions from output of `uname(1)` + uname, err := exec.LookPath("uname") + if err != nil { + return "", err + } + + out, err := invoke.CommandWithContext(ctx, uname, "-srv") + if err != nil { + return "", err + } + + fields := strings.Fields(string(out)) + if len(fields) >= 2 { + return fields[1], nil + } + return "", fmt.Errorf("could not get kernel version") +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + /* This is not finished yet at all. Please contribute! */ + + version, err = KernelVersion() + if err != nil { + return "", "", "", err + } + + return "solaris", "solaris", version, nil +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_windows.go b/vendor/github.com/shirou/gopsutil/host/host_windows.go new file mode 100644 index 00000000..f9e1a16a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_windows.go @@ -0,0 +1,349 @@ +// +build windows + +package host + +import ( + "context" + "fmt" + "math" + "os" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "github.com/StackExchange/wmi" + "github.com/shirou/gopsutil/internal/common" + process "github.com/shirou/gopsutil/process" + "golang.org/x/sys/windows" +) + +var ( + procGetSystemTimeAsFileTime = common.Modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetTickCount32 = common.Modkernel32.NewProc("GetTickCount") + procGetTickCount64 = common.Modkernel32.NewProc("GetTickCount64") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procRtlGetVersion = common.ModNt.NewProc("RtlGetVersion") +) + +// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw +type osVersionInfoExW struct { + dwOSVersionInfoSize uint32 + dwMajorVersion uint32 + dwMinorVersion uint32 + dwBuildNumber uint32 + dwPlatformId uint32 + szCSDVersion [128]uint16 + wServicePackMajor uint16 + wServicePackMinor uint16 + wSuiteMask uint16 + wProductType uint8 + wReserved uint8 +} + +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +type msAcpi_ThermalZoneTemperature struct { + Active bool + CriticalTripPoint uint32 + CurrentTemperature uint32 + InstanceName string +} + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + } + + { + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + } + + { + platform, family, version, err := PlatformInformationWithContext(ctx) + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } else { + return ret, err + } + } + + { + kernelArch, err := kernelArch() + if err == nil { + ret.KernelArch = kernelArch + } + } + + { + boot, err := BootTimeWithContext(ctx) + if err == nil { + ret.BootTime = boot + ret.Uptime, _ = Uptime() + } + } + + { + hostID, err := getMachineGuid() + if err == nil { + ret.HostID = hostID + } + } + + { + procs, err := process.PidsWithContext(ctx) + if err == nil { + ret.Procs = uint64(len(procs)) + } + } + + return ret, nil +} + +func getMachineGuid() (string, error) { + // there has been reports of issues on 32bit using golang.org/x/sys/windows/registry, see https://github.com/shirou/gopsutil/pull/312#issuecomment-277422612 + // for rationale of using windows.RegOpenKeyEx/RegQueryValueEx instead of registry.OpenKey/GetStringValue + var h windows.Handle + err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer windows.RegCloseKey(h) + + const windowsRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [windowsRegBufLen]uint16 + bufLen := uint32(windowsRegBufLen) + var valType uint32 + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := windows.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return strings.ToLower(hostID), nil +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + procGetTickCount := procGetTickCount64 + err := procGetTickCount64.Find() + if err != nil { + procGetTickCount = procGetTickCount32 // handle WinXP, but keep in mind that "the time will wrap around to zero if the system is run continuously for 49.7 days." from MSDN + } + r1, _, lastErr := syscall.Syscall(procGetTickCount.Addr(), 0, 0, 0, 0) + if lastErr != 0 { + return 0, lastErr + } + return uint64((time.Duration(r1) * time.Millisecond).Seconds()), nil +} + +func bootTimeFromUptime(up uint64) uint64 { + return uint64(time.Now().Unix()) - up +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + up, err := Uptime() + if err != nil { + return 0, err + } + t = bootTimeFromUptime(up) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + // GetVersionEx lies on Windows 8.1 and returns as Windows 8 if we don't declare compatibility in manifest + // RtlGetVersion bypasses this lying layer and returns the true Windows version + // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/nf-wdm-rtlgetversion + // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw + var osInfo osVersionInfoExW + osInfo.dwOSVersionInfoSize = uint32(unsafe.Sizeof(osInfo)) + ret, _, err := procRtlGetVersion.Call(uintptr(unsafe.Pointer(&osInfo))) + if ret != 0 { + return + } + + // Platform + var h windows.Handle // like getMachineGuid(), we query the registry using the raw windows.RegOpenKeyEx/RegQueryValueEx + err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h) + if err != nil { + return + } + defer windows.RegCloseKey(h) + var bufLen uint32 + var valType uint32 + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, nil, &bufLen) + if err != nil { + return + } + regBuf := make([]uint16, bufLen/2+1) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return + } + platform = windows.UTF16ToString(regBuf[:]) + if !strings.HasPrefix(platform, "Microsoft") { + platform = "Microsoft " + platform + } + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, nil, &bufLen) // append Service Pack number, only on success + if err == nil { // don't return an error if only the Service Pack retrieval fails + regBuf = make([]uint16, bufLen/2+1) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err == nil { + platform += " " + windows.UTF16ToString(regBuf[:]) + } + } + + // PlatformFamily + switch osInfo.wProductType { + case 1: + family = "Standalone Workstation" + case 2: + family = "Server (Domain Controller)" + case 3: + family = "Server" + } + + // Platform Version + version = fmt.Sprintf("%d.%d.%d Build %d", osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, osInfo.dwBuildNumber) + + return platform, family, version, nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + var ret []TemperatureStat + var dst []msAcpi_ThermalZoneTemperature + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst, nil, "root/wmi"); err != nil { + return ret, err + } + + for _, v := range dst { + ts := TemperatureStat{ + SensorKey: v.InstanceName, + Temperature: kelvinToCelsius(v.CurrentTemperature, 2), + } + ret = append(ret, ts) + } + + return ret, nil +} + +func kelvinToCelsius(temp uint32, n int) float64 { + // wmi return temperature Kelvin * 10, so need to divide the result by 10, + // and then minus 273.15 to get °Celsius. + t := float64(temp/10) - 273.15 + n10 := math.Pow10(n) + return math.Trunc((t+0.5/n10)*n10) / n10 +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} + +func kernelArch() (string, error) { + var systemInfo systemInfo + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + + const ( + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_ARM64 = 12 + PROCESSOR_ARCHITECTURE_IA64 = 6 + PROCESSOR_ARCHITECTURE_AMD64 = 9 + ) + switch systemInfo.wProcessorArchitecture { + case PROCESSOR_ARCHITECTURE_INTEL: + if systemInfo.wProcessorLevel < 3 { + return "i386", nil + } + if systemInfo.wProcessorLevel > 6 { + return "i686", nil + } + return fmt.Sprintf("i%d86", systemInfo.wProcessorLevel), nil + case PROCESSOR_ARCHITECTURE_ARM: + return "arm", nil + case PROCESSOR_ARCHITECTURE_ARM64: + return "aarch64", nil + case PROCESSOR_ARCHITECTURE_IA64: + return "ia64", nil + case PROCESSOR_ARCHITECTURE_AMD64: + return "x86_64", nil + } + return "", nil +} diff --git a/vendor/github.com/shirou/gopsutil/host/types.go b/vendor/github.com/shirou/gopsutil/host/types.go new file mode 100644 index 00000000..1eff4755 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/types.go @@ -0,0 +1,25 @@ +package host + +import ( + "fmt" +) + +type Warnings struct { + List []error +} + +func (w *Warnings) Add(err error) { + w.List = append(w.List, err) +} + +func (w *Warnings) Reference() error { + if len(w.List) > 0 { + return w + } else { + return nil + } +} + +func (w *Warnings) Error() string { + return fmt.Sprintf("Number of warnings: %v", len(w.List)) +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/internal/common/binary.go new file mode 100644 index 00000000..9b5dc55b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/binary.go @@ -0,0 +1,634 @@ +package common + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package binary implements simple translation between numbers and byte +// sequences and encoding and decoding of varints. +// +// Numbers are translated by reading and writing fixed-size values. +// A fixed-size value is either a fixed-size arithmetic +// type (int8, uint8, int16, float32, complex64, ...) +// or an array or struct containing only fixed-size values. +// +// The varint functions encode and decode single integer values using +// a variable-length encoding; smaller values require fewer bytes. +// For a specification, see +// http://code.google.com/apis/protocolbuffers/docs/encoding.html. +// +// This package favors simplicity over efficiency. Clients that require +// high-performance serialization, especially for large data structures, +// should look at more advanced solutions such as the encoding/gob +// package or protocol buffers. +import ( + "errors" + "io" + "math" + "reflect" +) + +// A ByteOrder specifies how to convert byte sequences into +// 16-, 32-, or 64-bit unsigned integers. +type ByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) + String() string +} + +// LittleEndian is the little-endian implementation of ByteOrder. +var LittleEndian littleEndian + +// BigEndian is the big-endian implementation of ByteOrder. +var BigEndian bigEndian + +type littleEndian struct{} + +func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 } + +func (littleEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (littleEndian) Uint32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (littleEndian) Uint64(b []byte) uint64 { + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (littleEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +func (littleEndian) String() string { return "LittleEndian" } + +func (littleEndian) GoString() string { return "binary.LittleEndian" } + +type bigEndian struct{} + +func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 } + +func (bigEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (bigEndian) Uint32(b []byte) uint32 { + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (bigEndian) Uint64(b []byte) uint64 { + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (bigEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} + +func (bigEndian) String() string { return "BigEndian" } + +func (bigEndian) GoString() string { return "binary.BigEndian" } + +// Read reads structured binary data from r into data. +// Data must be a pointer to a fixed-size value or a slice +// of fixed-size values. +// Bytes read from r are decoded using the specified byte order +// and written to successive fields of the data. +// When reading into structs, the field data for fields with +// blank (_) field names is skipped; i.e., blank field names +// may be used for padding. +// When reading into a struct, all non-blank fields must be exported. +func Read(r io.Reader, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + if _, err := io.ReadFull(r, bs); err != nil { + return err + } + switch data := data.(type) { + case *int8: + *data = int8(b[0]) + case *uint8: + *data = b[0] + case *int16: + *data = int16(order.Uint16(bs)) + case *uint16: + *data = order.Uint16(bs) + case *int32: + *data = int32(order.Uint32(bs)) + case *uint32: + *data = order.Uint32(bs) + case *int64: + *data = int64(order.Uint64(bs)) + case *uint64: + *data = order.Uint64(bs) + case []int8: + for i, x := range bs { // Easier to loop over the input for 8-bit values. + data[i] = int8(x) + } + case []uint8: + copy(data, bs) + case []int16: + for i := range data { + data[i] = int16(order.Uint16(bs[2*i:])) + } + case []uint16: + for i := range data { + data[i] = order.Uint16(bs[2*i:]) + } + case []int32: + for i := range data { + data[i] = int32(order.Uint32(bs[4*i:])) + } + case []uint32: + for i := range data { + data[i] = order.Uint32(bs[4*i:]) + } + case []int64: + for i := range data { + data[i] = int64(order.Uint64(bs[8*i:])) + } + case []uint64: + for i := range data { + data[i] = order.Uint64(bs[8*i:]) + } + } + return nil + } + + // Fallback to reflect-based decoding. + v := reflect.ValueOf(data) + size := -1 + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + size = dataSize(v) + case reflect.Slice: + size = dataSize(v) + } + if size < 0 { + return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) + } + d := &decoder{order: order, buf: make([]byte, size)} + if _, err := io.ReadFull(r, d.buf); err != nil { + return err + } + d.value(v) + return nil +} + +// Write writes the binary representation of data into w. +// Data must be a fixed-size value or a slice of fixed-size +// values, or a pointer to such data. +// Bytes written to w are encoded using the specified byte order +// and read from successive fields of the data. +// When writing structs, zero values are written for fields +// with blank (_) field names. +func Write(w io.Writer, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + switch v := data.(type) { + case *int8: + bs = b[:1] + b[0] = byte(*v) + case int8: + bs = b[:1] + b[0] = byte(v) + case []int8: + for i, x := range v { + bs[i] = byte(x) + } + case *uint8: + bs = b[:1] + b[0] = *v + case uint8: + bs = b[:1] + b[0] = byte(v) + case []uint8: + bs = v + case *int16: + bs = b[:2] + order.PutUint16(bs, uint16(*v)) + case int16: + bs = b[:2] + order.PutUint16(bs, uint16(v)) + case []int16: + for i, x := range v { + order.PutUint16(bs[2*i:], uint16(x)) + } + case *uint16: + bs = b[:2] + order.PutUint16(bs, *v) + case uint16: + bs = b[:2] + order.PutUint16(bs, v) + case []uint16: + for i, x := range v { + order.PutUint16(bs[2*i:], x) + } + case *int32: + bs = b[:4] + order.PutUint32(bs, uint32(*v)) + case int32: + bs = b[:4] + order.PutUint32(bs, uint32(v)) + case []int32: + for i, x := range v { + order.PutUint32(bs[4*i:], uint32(x)) + } + case *uint32: + bs = b[:4] + order.PutUint32(bs, *v) + case uint32: + bs = b[:4] + order.PutUint32(bs, v) + case []uint32: + for i, x := range v { + order.PutUint32(bs[4*i:], x) + } + case *int64: + bs = b[:8] + order.PutUint64(bs, uint64(*v)) + case int64: + bs = b[:8] + order.PutUint64(bs, uint64(v)) + case []int64: + for i, x := range v { + order.PutUint64(bs[8*i:], uint64(x)) + } + case *uint64: + bs = b[:8] + order.PutUint64(bs, *v) + case uint64: + bs = b[:8] + order.PutUint64(bs, v) + case []uint64: + for i, x := range v { + order.PutUint64(bs[8*i:], x) + } + } + _, err := w.Write(bs) + return err + } + + // Fallback to reflect-based encoding. + v := reflect.Indirect(reflect.ValueOf(data)) + size := dataSize(v) + if size < 0 { + return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String()) + } + buf := make([]byte, size) + e := &encoder{order: order, buf: buf} + e.value(v) + _, err := w.Write(buf) + return err +} + +// Size returns how many bytes Write would generate to encode the value v, which +// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. +// If v is neither of these, Size returns -1. +func Size(v interface{}) int { + return dataSize(reflect.Indirect(reflect.ValueOf(v))) +} + +// dataSize returns the number of bytes the actual data represented by v occupies in memory. +// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice +// it returns the length of the slice times the element size and does not count the memory +// occupied by the header. If the type of v is not acceptable, dataSize returns -1. +func dataSize(v reflect.Value) int { + if v.Kind() == reflect.Slice { + if s := sizeof(v.Type().Elem()); s >= 0 { + return s * v.Len() + } + return -1 + } + return sizeof(v.Type()) +} + +// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable. +func sizeof(t reflect.Type) int { + switch t.Kind() { + case reflect.Array: + if s := sizeof(t.Elem()); s >= 0 { + return s * t.Len() + } + + case reflect.Struct: + sum := 0 + for i, n := 0, t.NumField(); i < n; i++ { + s := sizeof(t.Field(i).Type) + if s < 0 { + return -1 + } + sum += s + } + return sum + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr: + return int(t.Size()) + } + + return -1 +} + +type coder struct { + order ByteOrder + buf []byte +} + +type decoder coder +type encoder coder + +func (d *decoder) uint8() uint8 { + x := d.buf[0] + d.buf = d.buf[1:] + return x +} + +func (e *encoder) uint8(x uint8) { + e.buf[0] = x + e.buf = e.buf[1:] +} + +func (d *decoder) uint16() uint16 { + x := d.order.Uint16(d.buf[0:2]) + d.buf = d.buf[2:] + return x +} + +func (e *encoder) uint16(x uint16) { + e.order.PutUint16(e.buf[0:2], x) + e.buf = e.buf[2:] +} + +func (d *decoder) uint32() uint32 { + x := d.order.Uint32(d.buf[0:4]) + d.buf = d.buf[4:] + return x +} + +func (e *encoder) uint32(x uint32) { + e.order.PutUint32(e.buf[0:4], x) + e.buf = e.buf[4:] +} + +func (d *decoder) uint64() uint64 { + x := d.order.Uint64(d.buf[0:8]) + d.buf = d.buf[8:] + return x +} + +func (e *encoder) uint64(x uint64) { + e.order.PutUint64(e.buf[0:8], x) + e.buf = e.buf[8:] +} + +func (d *decoder) int8() int8 { return int8(d.uint8()) } + +func (e *encoder) int8(x int8) { e.uint8(uint8(x)) } + +func (d *decoder) int16() int16 { return int16(d.uint16()) } + +func (e *encoder) int16(x int16) { e.uint16(uint16(x)) } + +func (d *decoder) int32() int32 { return int32(d.uint32()) } + +func (e *encoder) int32(x int32) { e.uint32(uint32(x)) } + +func (d *decoder) int64() int64 { return int64(d.uint64()) } + +func (e *encoder) int64(x int64) { e.uint64(uint64(x)) } + +func (d *decoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // Note: Calling v.CanSet() below is an optimization. + // It would be sufficient to check the field name, + // but creating the StructField info for each field is + // costly (run "go test -bench=ReadStruct" and compare + // results when making changes to this code). + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + d.value(v) + } else { + d.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Int8: + v.SetInt(int64(d.int8())) + case reflect.Int16: + v.SetInt(int64(d.int16())) + case reflect.Int32: + v.SetInt(int64(d.int32())) + case reflect.Int64: + v.SetInt(d.int64()) + + case reflect.Uint8: + v.SetUint(uint64(d.uint8())) + case reflect.Uint16: + v.SetUint(uint64(d.uint16())) + case reflect.Uint32: + v.SetUint(uint64(d.uint32())) + case reflect.Uint64: + v.SetUint(d.uint64()) + + case reflect.Float32: + v.SetFloat(float64(math.Float32frombits(d.uint32()))) + case reflect.Float64: + v.SetFloat(math.Float64frombits(d.uint64())) + + case reflect.Complex64: + v.SetComplex(complex( + float64(math.Float32frombits(d.uint32())), + float64(math.Float32frombits(d.uint32())), + )) + case reflect.Complex128: + v.SetComplex(complex( + math.Float64frombits(d.uint64()), + math.Float64frombits(d.uint64()), + )) + } +} + +func (e *encoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // see comment for corresponding code in decoder.value() + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + e.value(v) + } else { + e.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Type().Kind() { + case reflect.Int8: + e.int8(int8(v.Int())) + case reflect.Int16: + e.int16(int16(v.Int())) + case reflect.Int32: + e.int32(int32(v.Int())) + case reflect.Int64: + e.int64(v.Int()) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Type().Kind() { + case reflect.Uint8: + e.uint8(uint8(v.Uint())) + case reflect.Uint16: + e.uint16(uint16(v.Uint())) + case reflect.Uint32: + e.uint32(uint32(v.Uint())) + case reflect.Uint64: + e.uint64(v.Uint()) + } + + case reflect.Float32, reflect.Float64: + switch v.Type().Kind() { + case reflect.Float32: + e.uint32(math.Float32bits(float32(v.Float()))) + case reflect.Float64: + e.uint64(math.Float64bits(v.Float())) + } + + case reflect.Complex64, reflect.Complex128: + switch v.Type().Kind() { + case reflect.Complex64: + x := v.Complex() + e.uint32(math.Float32bits(float32(real(x)))) + e.uint32(math.Float32bits(float32(imag(x)))) + case reflect.Complex128: + x := v.Complex() + e.uint64(math.Float64bits(real(x))) + e.uint64(math.Float64bits(imag(x))) + } + } +} + +func (d *decoder) skip(v reflect.Value) { + d.buf = d.buf[dataSize(v):] +} + +func (e *encoder) skip(v reflect.Value) { + n := dataSize(v) + for i := range e.buf[0:n] { + e.buf[i] = 0 + } + e.buf = e.buf[n:] +} + +// intDataSize returns the size of the data required to represent the data when encoded. +// It returns zero if the type cannot be implemented by the fast path in Read or Write. +func intDataSize(data interface{}) int { + switch data := data.(type) { + case int8, *int8, *uint8: + return 1 + case []int8: + return len(data) + case []uint8: + return len(data) + case int16, *int16, *uint16: + return 2 + case []int16: + return 2 * len(data) + case []uint16: + return 2 * len(data) + case int32, *int32, *uint32: + return 4 + case []int32: + return 4 * len(data) + case []uint32: + return 4 * len(data) + case int64, *int64, *uint64: + return 8 + case []int64: + return 8 * len(data) + case []uint64: + return 8 * len(data) + } + return 0 +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common.go b/vendor/github.com/shirou/gopsutil/internal/common/common.go new file mode 100644 index 00000000..4ca8bc97 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common.go @@ -0,0 +1,355 @@ +package common + +// +// gopsutil is a port of psutil(http://pythonhosted.org/psutil/). +// This covers these architectures. +// - linux (amd64, arm) +// - freebsd (amd64) +// - windows (amd64) +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +var ( + Timeout = 3 * time.Second + ErrTimeout = errors.New("command timed out") +) + +type Invoker interface { + Command(string, ...string) ([]byte, error) + CommandWithContext(context.Context, string, ...string) ([]byte, error) +} + +type Invoke struct{} + +func (i Invoke) Command(name string, arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + return i.CommandWithContext(ctx, name, arg...) +} + +func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + cmd := exec.CommandContext(ctx, name, arg...) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Start(); err != nil { + return buf.Bytes(), err + } + + if err := cmd.Wait(); err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil +} + +type FakeInvoke struct { + Suffix string // Suffix species expected file name suffix such as "fail" + Error error // If Error specfied, return the error. +} + +// Command in FakeInvoke returns from expected file if exists. +func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { + if i.Error != nil { + return []byte{}, i.Error + } + + arch := runtime.GOOS + + commandName := filepath.Base(name) + + fname := strings.Join(append([]string{commandName}, arg...), "") + fname = url.QueryEscape(fname) + fpath := path.Join("testdata", arch, fname) + if i.Suffix != "" { + fpath += "_" + i.Suffix + } + if PathExists(fpath) { + return ioutil.ReadFile(fpath) + } + return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) +} + +func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + return i.Command(name, arg...) +} + +var ErrNotImplementedError = errors.New("not implemented yet") + +// ReadLines reads contents from a file and splits them by new lines. +// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). +func ReadLines(filename string) ([]string, error) { + return ReadLinesOffsetN(filename, 0, -1) +} + +// ReadLines reads contents from file and splits them by new line. +// The offset tells at which line number to start. +// The count determines the number of lines to read (starting from offset): +// n >= 0: at most n lines +// n < 0: whole file +func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { + f, err := os.Open(filename) + if err != nil { + return []string{""}, err + } + defer f.Close() + + var ret []string + + r := bufio.NewReader(f) + for i := 0; i < n+int(offset) || n < 0; i++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + if i < int(offset) { + continue + } + ret = append(ret, strings.Trim(line, "\n")) + } + + return ret, nil +} + +func IntToString(orig []int8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func UintToString(orig []uint8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func ByteToString(orig []byte) string { + n := -1 + l := -1 + for i, b := range orig { + // skip left side null + if l == -1 && b == 0 { + continue + } + if l == -1 { + l = i + } + + if b == 0 { + break + } + n = i + 1 + } + if n == -1 { + return string(orig) + } + return string(orig[l:n]) +} + +// ReadInts reads contents from single line file and returns them as []int32. +func ReadInts(filename string) ([]int64, error) { + f, err := os.Open(filename) + if err != nil { + return []int64{}, err + } + defer f.Close() + + var ret []int64 + + r := bufio.NewReader(f) + + // The int files that this is concerned with should only be one liners. + line, err := r.ReadString('\n') + if err != nil { + return []int64{}, err + } + + i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32) + if err != nil { + return []int64{}, err + } + ret = append(ret, i) + + return ret, nil +} + +// Parse Hex to uint32 without error +func HexToUint32(hex string) uint32 { + vv, _ := strconv.ParseUint(hex, 16, 32) + return uint32(vv) +} + +// Parse to int32 without error +func mustParseInt32(val string) int32 { + vv, _ := strconv.ParseInt(val, 10, 32) + return int32(vv) +} + +// Parse to uint64 without error +func mustParseUint64(val string) uint64 { + vv, _ := strconv.ParseInt(val, 10, 64) + return uint64(vv) +} + +// Parse to Float64 without error +func mustParseFloat64(val string) float64 { + vv, _ := strconv.ParseFloat(val, 64) + return vv +} + +// StringsHas checks the target string slice contains src or not +func StringsHas(target []string, src string) bool { + for _, t := range target { + if strings.TrimSpace(t) == src { + return true + } + } + return false +} + +// StringsContains checks the src in any string of the target string slice +func StringsContains(target []string, src string) bool { + for _, t := range target { + if strings.Contains(t, src) { + return true + } + } + return false +} + +// IntContains checks the src in any int of the target int slice. +func IntContains(target []int, src int) bool { + for _, t := range target { + if src == t { + return true + } + } + return false +} + +// get struct attributes. +// This method is used only for debugging platform dependent code. +func attributes(m interface{}) map[string]reflect.Type { + typ := reflect.TypeOf(m) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + attrs := make(map[string]reflect.Type) + if typ.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < typ.NumField(); i++ { + p := typ.Field(i) + if !p.Anonymous { + attrs[p.Name] = p.Type + } + } + + return attrs +} + +func PathExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} + +//GetEnv retrieves the environment variable key. If it does not exist it returns the default. +func GetEnv(key string, dfault string, combineWith ...string) string { + value := os.Getenv(key) + if value == "" { + value = dfault + } + + switch len(combineWith) { + case 0: + return value + case 1: + return filepath.Join(value, combineWith[0]) + default: + all := make([]string, len(combineWith)+1) + all[0] = value + copy(all[1:], combineWith) + return filepath.Join(all...) + } + panic("invalid switch case") +} + +func HostProc(combineWith ...string) string { + return GetEnv("HOST_PROC", "/proc", combineWith...) +} + +func HostSys(combineWith ...string) string { + return GetEnv("HOST_SYS", "/sys", combineWith...) +} + +func HostEtc(combineWith ...string) string { + return GetEnv("HOST_ETC", "/etc", combineWith...) +} + +func HostVar(combineWith ...string) string { + return GetEnv("HOST_VAR", "/var", combineWith...) +} + +func HostRun(combineWith ...string) string { + return GetEnv("HOST_RUN", "/run", combineWith...) +} + +// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running +// sysctl commands (see DoSysctrl). +func getSysctrlEnv(env []string) []string { + foundLC := false + for i, line := range env { + if strings.HasPrefix(line, "LC_ALL") { + env[i] = "LC_ALL=C" + foundLC = true + } + } + if !foundLC { + env = append(env, "LC_ALL=C") + } + return env +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go new file mode 100644 index 00000000..dde5c390 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go @@ -0,0 +1,69 @@ +// +build darwin + +package common + +import ( + "context" + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + sysctl, err := exec.LookPath("sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.CommandContext(ctx, sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go new file mode 100644 index 00000000..85bda0e2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go @@ -0,0 +1,85 @@ +// +build freebsd openbsd + +package common + +import ( + "fmt" + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func SysctlUint(mib string) (uint64, error) { + buf, err := unix.SysctlRaw(mib) + if err != nil { + return 0, err + } + if len(buf) == 8 { // 64 bit + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + } + if len(buf) == 4 { // 32bit + t := *(*uint32)(unsafe.Pointer(&buf[0])) + return uint64(t), nil + } + return 0, fmt.Errorf("unexpected size: %s, %d", mib, len(buf)) +} + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go new file mode 100644 index 00000000..f558b74b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go @@ -0,0 +1,264 @@ +// +build linux + +package common + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync/atomic" + "time" +) + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func NumProcs() (uint64, error) { + f, err := os.Open(HostProc()) + if err != nil { + return 0, err + } + defer f.Close() + + list, err := f.Readdirnames(-1) + if err != nil { + return 0, err + } + var cnt uint64 + + for _, v := range list { + if _, err = strconv.ParseUint(v, 10, 64); err == nil { + cnt++ + } + } + + return cnt, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + + system, role, err := Virtualization() + if err != nil { + return 0, err + } + + statFile := "stat" + if system == "lxc" && role == "guest" { + // if lxc, /proc/uptime is used. + statFile = "uptime" + } else if system == "docker" && role == "guest" { + // also docker, guest + statFile = "uptime" + } + + filename := HostProc(statFile) + lines, err := ReadLines(filename) + if err != nil { + return 0, err + } + + if statFile == "stat" { + for _, line := range lines { + if strings.HasPrefix(line, "btime") { + f := strings.Fields(line) + if len(f) != 2 { + return 0, fmt.Errorf("wrong btime format") + } + b, err := strconv.ParseInt(f[1], 10, 64) + if err != nil { + return 0, err + } + t = uint64(b) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil + } + } + } else if statFile == "uptime" { + if len(lines) != 1 { + return 0, fmt.Errorf("wrong uptime format") + } + f := strings.Fields(lines[0]) + b, err := strconv.ParseFloat(f[0], 64) + if err != nil { + return 0, err + } + t = uint64(time.Now().Unix()) - uint64(b) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil + } + + return 0, fmt.Errorf("could not find btime") +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + var system string + var role string + + filename := HostProc("xen") + if PathExists(filename) { + system = "xen" + role = "guest" // assume guest + + if PathExists(filepath.Join(filename, "capabilities")) { + contents, err := ReadLines(filepath.Join(filename, "capabilities")) + if err == nil { + if StringsContains(contents, "control_d") { + role = "host" + } + } + } + } + + filename = HostProc("modules") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "kvm") { + system = "kvm" + role = "host" + } else if StringsContains(contents, "vboxdrv") { + system = "vbox" + role = "host" + } else if StringsContains(contents, "vboxguest") { + system = "vbox" + role = "guest" + } else if StringsContains(contents, "vmware") { + system = "vmware" + role = "guest" + } + } + } + + filename = HostProc("cpuinfo") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "QEMU Virtual CPU") || + StringsContains(contents, "Common KVM processor") || + StringsContains(contents, "Common 32-bit KVM processor") { + system = "kvm" + role = "guest" + } + } + } + + filename = HostProc("bus/pci/devices") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "virtio-pci") { + role = "guest" + } + } + } + + filename = HostProc() + if PathExists(filepath.Join(filename, "bc", "0")) { + system = "openvz" + role = "host" + } else if PathExists(filepath.Join(filename, "vz")) { + system = "openvz" + role = "guest" + } + + // not use dmidecode because it requires root + if PathExists(filepath.Join(filename, "self", "status")) { + contents, err := ReadLines(filepath.Join(filename, "self", "status")) + if err == nil { + + if StringsContains(contents, "s_context:") || + StringsContains(contents, "VxID:") { + system = "linux-vserver" + } + // TODO: guest or host + } + } + + if PathExists(filepath.Join(filename, "self", "cgroup")) { + contents, err := ReadLines(filepath.Join(filename, "self", "cgroup")) + if err == nil { + if StringsContains(contents, "lxc") { + system = "lxc" + role = "guest" + } else if StringsContains(contents, "docker") { + system = "docker" + role = "guest" + } else if StringsContains(contents, "machine-rkt") { + system = "rkt" + role = "guest" + } else if PathExists("/usr/bin/lxc-version") { + system = "lxc" + role = "host" + } + } + } + + if PathExists(HostEtc("os-release")) { + p, _, err := GetOSRelease() + if err == nil && p == "coreos" { + system = "rkt" // Is it true? + role = "host" + } + } + return system, role, nil +} + +func GetOSRelease() (platform string, version string, err error) { + contents, err := ReadLines(HostEtc("os-release")) + if err != nil { + return "", "", nil // return empty + } + for _, line := range contents { + field := strings.Split(line, "=") + if len(field) < 2 { + continue + } + switch field[0] { + case "ID": // use ID for lowercase + platform = trimQuotes(field[1]) + case "VERSION": + version = trimQuotes(field[1]) + } + } + return platform, version, nil +} + +// Remove quotes of the source string +func trimQuotes(s string) string { + if len(s) >= 2 { + if s[0] == '"' && s[len(s)-1] == '"' { + return s[1 : len(s)-1] + } + } + return s +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go new file mode 100644 index 00000000..ba73a7eb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go @@ -0,0 +1,69 @@ +// +build openbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go new file mode 100644 index 00000000..9e393bcf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go @@ -0,0 +1,67 @@ +// +build linux freebsd darwin openbsd + +package common + +import ( + "context" + "os/exec" + "strconv" + "strings" +) + +func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-a", "-n", "-P"} + } else { + cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} + } + cmd = append(cmd, args...) + lsof, err := exec.LookPath("lsof") + if err != nil { + return []string{}, err + } + out, err := invoke.CommandWithContext(ctx, lsof, cmd...) + if err != nil { + // if no pid found, lsof returns code 1. + if err.Error() == "exit status 1" && len(out) == 0 { + return []string{}, nil + } + } + lines := strings.Split(string(out), "\n") + + var ret []string + for _, l := range lines[1:] { + if len(l) == 0 { + continue + } + ret = append(ret, l) + } + return ret, nil +} + +func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { + var cmd []string + cmd = []string{"-P", strconv.Itoa(int(pid))} + pgrep, err := exec.LookPath("pgrep") + if err != nil { + return []int32{}, err + } + out, err := invoke.CommandWithContext(ctx, pgrep, cmd...) + if err != nil { + return []int32{}, err + } + lines := strings.Split(string(out), "\n") + ret := make([]int32, 0, len(lines)) + for _, l := range lines { + if len(l) == 0 { + continue + } + i, err := strconv.Atoi(l) + if err != nil { + continue + } + ret = append(ret, int32(i)) + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go new file mode 100644 index 00000000..dbc8b675 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go @@ -0,0 +1,160 @@ +// +build windows + +package common + +import ( + "context" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/StackExchange/wmi" + "golang.org/x/sys/windows" +) + +// for double values +type PDH_FMT_COUNTERVALUE_DOUBLE struct { + CStatus uint32 + DoubleValue float64 +} + +// for 64 bit integer values +type PDH_FMT_COUNTERVALUE_LARGE struct { + CStatus uint32 + LargeValue int64 +} + +// for long values +type PDH_FMT_COUNTERVALUE_LONG struct { + CStatus uint32 + LongValue int32 + padding [4]byte +} + +// windows system const +const ( + ERROR_SUCCESS = 0 + ERROR_FILE_NOT_FOUND = 2 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + HKEY_LOCAL_MACHINE = 0x80000002 + RRF_RT_REG_SZ = 0x00000002 + RRF_RT_REG_DWORD = 0x00000010 + PDH_FMT_LONG = 0x00000100 + PDH_FMT_DOUBLE = 0x00000200 + PDH_FMT_LARGE = 0x00000400 + PDH_INVALID_DATA = 0xc0000bc6 + PDH_INVALID_HANDLE = 0xC0000bbc + PDH_NO_DATA = 0x800007d5 +) + +var ( + Modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + ModNt = windows.NewLazySystemDLL("ntdll.dll") + ModPdh = windows.NewLazySystemDLL("pdh.dll") + ModPsapi = windows.NewLazySystemDLL("psapi.dll") + + ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes") + ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation") + PdhOpenQuery = ModPdh.NewProc("PdhOpenQuery") + PdhAddCounter = ModPdh.NewProc("PdhAddCounterW") + PdhCollectQueryData = ModPdh.NewProc("PdhCollectQueryData") + PdhGetFormattedCounterValue = ModPdh.NewProc("PdhGetFormattedCounterValue") + PdhCloseQuery = ModPdh.NewProc("PdhCloseQuery") + + procQueryDosDeviceW = Modkernel32.NewProc("QueryDosDeviceW") +) + +type FILETIME struct { + DwLowDateTime uint32 + DwHighDateTime uint32 +} + +// borrowed from net/interface_windows.go +func BytePtrToString(p *uint8) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// CounterInfo +// copied from https://github.com/mackerelio/mackerel-agent/ +type CounterInfo struct { + PostName string + CounterName string + Counter windows.Handle +} + +// CreateQuery XXX +// copied from https://github.com/mackerelio/mackerel-agent/ +func CreateQuery() (windows.Handle, error) { + var query windows.Handle + r, _, err := PdhOpenQuery.Call(0, 0, uintptr(unsafe.Pointer(&query))) + if r != 0 { + return 0, err + } + return query, nil +} + +// CreateCounter XXX +func CreateCounter(query windows.Handle, pname, cname string) (*CounterInfo, error) { + var counter windows.Handle + r, _, err := PdhAddCounter.Call( + uintptr(query), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(cname))), + 0, + uintptr(unsafe.Pointer(&counter))) + if r != 0 { + return nil, err + } + return &CounterInfo{ + PostName: pname, + CounterName: cname, + Counter: counter, + }, nil +} + +// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging +func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { + if _, ok := ctx.Deadline(); !ok { + ctxTimeout, cancel := context.WithTimeout(ctx, Timeout) + defer cancel() + ctx = ctxTimeout + } + + errChan := make(chan error, 1) + go func() { + errChan <- wmi.Query(query, dst, connectServerArgs...) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errChan: + return err + } +} + +// Convert paths using native DOS format like: +// "\Device\HarddiskVolume1\Windows\systemew\file.txt" +// into: +// "C:\Windows\systemew\file.txt" +func ConvertDOSPath(p string) string { + rawDrive := strings.Join(strings.Split(p, `\`)[:3], `\`) + + for d := 'A'; d <= 'Z'; d++ { + szDeviceName := string(d) + ":" + szTarget := make([]uint16, 512) + ret, _, _ := procQueryDosDeviceW.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(szDeviceName))), + uintptr(unsafe.Pointer(&szTarget[0])), + uintptr(len(szTarget))) + if ret != 0 && windows.UTF16ToString(szTarget[:]) == rawDrive { + return filepath.Join(szDeviceName, p[len(rawDrive):]) + } + } + return p +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem.go b/vendor/github.com/shirou/gopsutil/mem/mem.go new file mode 100644 index 00000000..8e444ba0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem.go @@ -0,0 +1,101 @@ +package mem + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +// Memory usage statistics. Total, Available and Used contain numbers of bytes +// for human consumption. +// +// The other fields in this struct contain kernel specific values. +type VirtualMemoryStat struct { + // Total amount of RAM on this system + Total uint64 `json:"total"` + + // RAM available for programs to allocate + // + // This value is computed from the kernel specific values. + Available uint64 `json:"available"` + + // RAM used by programs + // + // This value is computed from the kernel specific values. + Used uint64 `json:"used"` + + // Percentage of RAM used by programs + // + // This value is computed from the kernel specific values. + UsedPercent float64 `json:"usedPercent"` + + // This is the kernel's notion of free memory; RAM chips whose bits nobody + // cares about the value of right now. For a human consumable number, + // Available is what you really want. + Free uint64 `json:"free"` + + // OS X / BSD specific numbers: + // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/ + Active uint64 `json:"active"` + Inactive uint64 `json:"inactive"` + Wired uint64 `json:"wired"` + + // FreeBSD specific numbers: + // https://reviews.freebsd.org/D8467 + Laundry uint64 `json:"laundry"` + + // Linux specific numbers + // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + Buffers uint64 `json:"buffers"` + Cached uint64 `json:"cached"` + Writeback uint64 `json:"writeback"` + Dirty uint64 `json:"dirty"` + WritebackTmp uint64 `json:"writebacktmp"` + Shared uint64 `json:"shared"` + Slab uint64 `json:"slab"` + SReclaimable uint64 `json:"sreclaimable"` + SUnreclaim uint64 `json:"sunreclaim"` + PageTables uint64 `json:"pagetables"` + SwapCached uint64 `json:"swapcached"` + CommitLimit uint64 `json:"commitlimit"` + CommittedAS uint64 `json:"committedas"` + HighTotal uint64 `json:"hightotal"` + HighFree uint64 `json:"highfree"` + LowTotal uint64 `json:"lowtotal"` + LowFree uint64 `json:"lowfree"` + SwapTotal uint64 `json:"swaptotal"` + SwapFree uint64 `json:"swapfree"` + Mapped uint64 `json:"mapped"` + VMallocTotal uint64 `json:"vmalloctotal"` + VMallocUsed uint64 `json:"vmallocused"` + VMallocChunk uint64 `json:"vmallocchunk"` + HugePagesTotal uint64 `json:"hugepagestotal"` + HugePagesFree uint64 `json:"hugepagesfree"` + HugePageSize uint64 `json:"hugepagesize"` +} + +type SwapMemoryStat struct { + Total uint64 `json:"total"` + Used uint64 `json:"used"` + Free uint64 `json:"free"` + UsedPercent float64 `json:"usedPercent"` + Sin uint64 `json:"sin"` + Sout uint64 `json:"sout"` + PgIn uint64 `json:"pgin"` + PgOut uint64 `json:"pgout"` + PgFault uint64 `json:"pgfault"` +} + +func (m VirtualMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (m SwapMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go new file mode 100644 index 00000000..fac74815 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go @@ -0,0 +1,69 @@ +// +build darwin + +package mem + +import ( + "context" + "encoding/binary" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +func getHwMemsize() (uint64, error) { + totalString, err := unix.Sysctl("hw.memsize") + if err != nil { + return 0, err + } + + // unix.sysctl() helpfully assumes the result is a null-terminated string and + // removes the last byte of the result if it's 0 :/ + totalString += "\x00" + + total := uint64(binary.LittleEndian.Uint64([]byte(totalString))) + + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go new file mode 100644 index 00000000..389f8cdf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go @@ -0,0 +1,59 @@ +// +build darwin +// +build cgo + +package mem + +/* +#include +*/ +import "C" + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) + var vmstat C.vm_statistics_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(&vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSize := uint64(unix.Getpagesize()) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := C.natural_t(total / pageSize) + + availableCount := vmstat.inactive_count + vmstat.free_count + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.free_count), + Active: pageSize * uint64(vmstat.active_count), + Inactive: pageSize * uint64(vmstat.inactive_count), + Wired: pageSize * uint64(vmstat.wire_count), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go new file mode 100644 index 00000000..dd7c2e60 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go @@ -0,0 +1,94 @@ +// +build darwin +// +build !cgo + +package mem + +import ( + "context" + "os/exec" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// Runs vm_stat and returns Free and inactive pages +func getVMStat(vms *VirtualMemoryStat) error { + vm_stat, err := exec.LookPath("vm_stat") + if err != nil { + return err + } + out, err := invoke.Command(vm_stat) + if err != nil { + return err + } + return parseVMStat(string(out), vms) +} + +func parseVMStat(out string, vms *VirtualMemoryStat) error { + var err error + + lines := strings.Split(out, "\n") + pagesize := uint64(unix.Getpagesize()) + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.Trim(fields[1], " .") + switch key { + case "Pages free": + free, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Free = free * pagesize + case "Pages inactive": + inactive, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Inactive = inactive * pagesize + case "Pages active": + active, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Active = active * pagesize + case "Pages wired down": + wired, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Wired = wired * pagesize + } + } + return err +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + ret := &VirtualMemoryStat{} + + total, err := getHwMemsize() + if err != nil { + return nil, err + } + err = getVMStat(ret) + if err != nil { + return nil, err + } + + ret.Available = ret.Free + ret.Inactive + ret.Total = total + + ret.Used = ret.Total - ret.Available + ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go new file mode 100644 index 00000000..2a0fd45b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go @@ -0,0 +1,25 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package mem + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go new file mode 100644 index 00000000..f91efc9e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go @@ -0,0 +1,167 @@ +// +build freebsd + +package mem + +import ( + "context" + "errors" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + pageSize, err := common.SysctlUint("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + physmem, err := common.SysctlUint("hw.physmem") + if err != nil { + return nil, err + } + + free, err := common.SysctlUint("vm.stats.vm.v_free_count") + if err != nil { + return nil, err + } + active, err := common.SysctlUint("vm.stats.vm.v_active_count") + if err != nil { + return nil, err + } + inactive, err := common.SysctlUint("vm.stats.vm.v_inactive_count") + if err != nil { + return nil, err + } + buffers, err := common.SysctlUint("vfs.bufspace") + if err != nil { + return nil, err + } + wired, err := common.SysctlUint("vm.stats.vm.v_wire_count") + if err != nil { + return nil, err + } + var cached, laundry uint64 + osreldate, _ := common.SysctlUint("kern.osreldate") + if osreldate < 1102000 { + cached, err = common.SysctlUint("vm.stats.vm.v_cache_count") + if err != nil { + return nil, err + } + } else { + laundry, err = common.SysctlUint("vm.stats.vm.v_laundry_count") + if err != nil { + return nil, err + } + } + + p := pageSize + ret := &VirtualMemoryStat{ + Total: physmem, + Free: free * p, + Active: active * p, + Inactive: inactive * p, + Cached: cached * p, + Buffers: buffers, + Wired: wired * p, + Laundry: laundry * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Laundry + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, nil +} + +// Return swapinfo +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +// Constants from vm/vm_param.h +// nolint: golint +const ( + XSWDEV_VERSION11 = 1 + XSWDEV_VERSION = 2 +) + +// Types from vm/vm_param.h +type xswdev struct { + Version uint32 // Version is the version + Dev uint64 // Dev is the device identifier + Flags int32 // Flags is the swap flags applied to the device + NBlks int32 // NBlks is the total number of blocks + Used int32 // Used is the number of blocks used +} + +// xswdev11 is a compatibility for under FreeBSD 11 +// sys/vm/swap_pager.c +type xswdev11 struct { + Version uint32 // Version is the version + Dev uint32 // Dev is the device identifier + Flags int32 // Flags is the swap flags applied to the device + NBlks int32 // NBlks is the total number of blocks + Used int32 // Used is the number of blocks used +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // FreeBSD can have multiple swap devices so we total them up + i, err := common.SysctlUint("vm.nswapdev") + if err != nil { + return nil, err + } + + if i == 0 { + return nil, errors.New("no swap devices found") + } + + c := int(i) + + i, err = common.SysctlUint("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := i + + var buf []byte + s := &SwapMemoryStat{} + for n := 0; n < c; n++ { + buf, err = unix.SysctlRaw("vm.swap_info", n) + if err != nil { + return nil, err + } + + // first, try to parse with version 2 + xsw := (*xswdev)(unsafe.Pointer(&buf[0])) + if xsw.Version == XSWDEV_VERSION11 { + // this is version 1, so try to parse again + xsw := (*xswdev11)(unsafe.Pointer(&buf[0])) + if xsw.Version != XSWDEV_VERSION11 { + return nil, errors.New("xswdev version mismatch(11)") + } + s.Total += uint64(xsw.NBlks) + s.Used += uint64(xsw.Used) + } else if xsw.Version != XSWDEV_VERSION { + return nil, errors.New("xswdev version mismatch") + } else { + s.Total += uint64(xsw.NBlks) + s.Used += uint64(xsw.Used) + } + + } + + if s.Total != 0 { + s.UsedPercent = float64(s.Used) / float64(s.Total) * 100 + } + s.Total *= pageSize + s.Used *= pageSize + s.Free = s.Total - s.Used + + return s, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/mem/mem_linux.go new file mode 100644 index 00000000..92cf143e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_linux.go @@ -0,0 +1,247 @@ +// +build linux + +package mem + +import ( + "context" + "math" + "os" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +type VirtualMemoryExStat struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + filename := common.HostProc("meminfo") + lines, _ := common.ReadLines(filename) + + // flag if MemAvailable is in /proc/meminfo (kernel 3.14+) + memavail := false + activeFile := false // "Active(file)" not available: 2.6.28 / Dec 2008 + inactiveFile := false // "Inactive(file)" not available: 2.6.28 / Dec 2008 + sReclaimable := false // "SReclaimable:" not available: 2.6.19 / Nov 2006 + + ret := &VirtualMemoryStat{} + retEx := &VirtualMemoryExStat{} + + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) != 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + value = strings.Replace(value, " kB", "", -1) + + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, err + } + switch key { + case "MemTotal": + ret.Total = t * 1024 + case "MemFree": + ret.Free = t * 1024 + case "MemAvailable": + memavail = true + ret.Available = t * 1024 + case "Buffers": + ret.Buffers = t * 1024 + case "Cached": + ret.Cached = t * 1024 + case "Active": + ret.Active = t * 1024 + case "Inactive": + ret.Inactive = t * 1024 + case "Active(file)": + activeFile = true + retEx.ActiveFile = t * 1024 + case "InActive(file)": + inactiveFile = true + retEx.InactiveFile = t * 1024 + case "Writeback": + ret.Writeback = t * 1024 + case "WritebackTmp": + ret.WritebackTmp = t * 1024 + case "Dirty": + ret.Dirty = t * 1024 + case "Shmem": + ret.Shared = t * 1024 + case "Slab": + ret.Slab = t * 1024 + case "SReclaimable": + sReclaimable = true + ret.SReclaimable = t * 1024 + case "SUnreclaim": + ret.SUnreclaim = t * 1024 + case "PageTables": + ret.PageTables = t * 1024 + case "SwapCached": + ret.SwapCached = t * 1024 + case "CommitLimit": + ret.CommitLimit = t * 1024 + case "Committed_AS": + ret.CommittedAS = t * 1024 + case "HighTotal": + ret.HighTotal = t * 1024 + case "HighFree": + ret.HighFree = t * 1024 + case "LowTotal": + ret.LowTotal = t * 1024 + case "LowFree": + ret.LowFree = t * 1024 + case "SwapTotal": + ret.SwapTotal = t * 1024 + case "SwapFree": + ret.SwapFree = t * 1024 + case "Mapped": + ret.Mapped = t * 1024 + case "VmallocTotal": + ret.VMallocTotal = t * 1024 + case "VmallocUsed": + ret.VMallocUsed = t * 1024 + case "VmallocChunk": + ret.VMallocChunk = t * 1024 + case "HugePages_Total": + ret.HugePagesTotal = t + case "HugePages_Free": + ret.HugePagesFree = t + case "Hugepagesize": + ret.HugePageSize = t * 1024 + } + } + + ret.Cached += ret.SReclaimable + + if !memavail { + if activeFile && inactiveFile && sReclaimable { + ret.Available = calcuateAvailVmem(ret, retEx) + } else { + ret.Available = ret.Cached + ret.Free + } + } + + ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + sysinfo := &unix.Sysinfo_t{} + + if err := unix.Sysinfo(sysinfo); err != nil { + return nil, err + } + ret := &SwapMemoryStat{ + Total: uint64(sysinfo.Totalswap) * uint64(sysinfo.Unit), + Free: uint64(sysinfo.Freeswap) * uint64(sysinfo.Unit), + } + ret.Used = ret.Total - ret.Free + //check Infinity + if ret.Total != 0 { + ret.UsedPercent = float64(ret.Total-ret.Free) / float64(ret.Total) * 100.0 + } else { + ret.UsedPercent = 0 + } + filename := common.HostProc("vmstat") + lines, _ := common.ReadLines(filename) + for _, l := range lines { + fields := strings.Fields(l) + if len(fields) < 2 { + continue + } + switch fields[0] { + case "pswpin": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sin = value * 4 * 1024 + case "pswpout": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sout = value * 4 * 1024 + case "pgpgin": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgIn = value * 4 * 1024 + case "pgpgout": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgOut = value * 4 * 1024 + case "pgfault": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgFault = value * 4 * 1024 + } + } + return ret, nil +} + +// calcuateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide +// "MemAvailable:" column. It reimplements an algorithm from the link below +// https://github.com/giampaolo/psutil/pull/890 +func calcuateAvailVmem(ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { + var watermarkLow uint64 + + fn := common.HostProc("zoneinfo") + lines, err := common.ReadLines(fn) + + if err != nil { + return ret.Free + ret.Cached // fallback under kernel 2.6.13 + } + + pagesize := uint64(os.Getpagesize()) + watermarkLow = 0 + + for _, line := range lines { + fields := strings.Fields(line) + + if strings.HasPrefix(fields[0], "low") { + lowValue, err := strconv.ParseUint(fields[1], 10, 64) + + if err != nil { + lowValue = 0 + } + watermarkLow += lowValue + } + } + + watermarkLow *= pagesize + + availMemory := ret.Free - watermarkLow + pageCache := retEx.ActiveFile + retEx.InactiveFile + pageCache -= uint64(math.Min(float64(pageCache/2), float64(watermarkLow))) + availMemory += pageCache + availMemory += ret.SReclaimable - uint64(math.Min(float64(ret.SReclaimable/2.0), float64(watermarkLow))) + + if availMemory < 0 { + availMemory = 0 + } + + return availMemory +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go new file mode 100644 index 00000000..35472a32 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go @@ -0,0 +1,124 @@ +// +build openbsd + +package mem + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "os/exec" + + "github.com/shirou/gopsutil/internal/common" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + mib := []int32{CTLVm, VmUvmexp} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return 0, err + } + if length < sizeOfUvmexp { + return 0, fmt.Errorf("short syscall ret %d bytes", length) + } + var uvmexp Uvmexp + br := bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &uvmexp) + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + mib := []int32{CTLVm, VmUvmexp} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length < sizeOfUvmexp { + return nil, fmt.Errorf("short syscall ret %d bytes", length) + } + var uvmexp Uvmexp + br := bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &uvmexp) + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + mib = []int32{CTLVfs, VfsGeneric, VfsBcacheStat} + buf, length, err = common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length < sizeOfBcachestats { + return nil, fmt.Errorf("short syscall ret %d bytes", length) + } + var bcs Bcachestats + br = bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &bcs) + if err != nil { + return nil, err + } + ret.Buffers = uint64(bcs.Numbufpages) * p + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + swapctl, err := exec.LookPath("swapctl") + if err != nil { + return nil, err + } + + out, err := invoke.CommandWithContext(ctx, swapctl, "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go new file mode 100644 index 00000000..e09b908e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package mem + +const ( + CTLVm = 2 + CTLVfs = 10 + VmUvmexp = 4 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfUvmexp = 0x154 + sizeOfBcachestats = 0x78 +) + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Anonpages int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Nanonneeded int32 + Nfreeanon int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Obsolete_swapins int32 + Obsolete_swapouts int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Zeroaborts int32 + Fltnoram int32 + Fltnoanon int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Pdreanon int32 + Pdrevnode int32 + Pdrevtext int32 + Fpswtch int32 + Kmapent int32 +} +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go new file mode 100644 index 00000000..08512733 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go @@ -0,0 +1,121 @@ +package mem + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +// VirtualMemory for Solaris is a minimal implementation which only returns +// what Nomad needs. It does take into account global vs zone, however. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + result := &VirtualMemoryStat{} + + zoneName, err := zoneName() + if err != nil { + return nil, err + } + + if zoneName == "global" { + cap, err := globalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + } else { + cap, err := nonGlobalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + } + + return result, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func zoneName() (string, error) { + zonename, err := exec.LookPath("zonename") + if err != nil { + return "", err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, zonename) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(out)), nil +} + +var globalZoneMemoryCapacityMatch = regexp.MustCompile(`memory size: ([\d]+) Megabytes`) + +func globalZoneMemoryCapacity() (uint64, error) { + prtconf, err := exec.LookPath("prtconf") + if err != nil { + return 0, err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, prtconf) + if err != nil { + return 0, err + } + + match := globalZoneMemoryCapacityMatch.FindAllStringSubmatch(string(out), -1) + if len(match) != 1 { + return 0, errors.New("memory size not contained in output of /usr/sbin/prtconf") + } + + totalMB, err := strconv.ParseUint(match[0][1], 10, 64) + if err != nil { + return 0, err + } + + return totalMB * 1024 * 1024, nil +} + +var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) + +func nonGlobalZoneMemoryCapacity() (uint64, error) { + kstat, err := exec.LookPath("kstat") + if err != nil { + return 0, err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, kstat, "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") + if err != nil { + return 0, err + } + + kstats := kstatMatch.FindAllStringSubmatch(string(out), -1) + if len(kstats) != 1 { + return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats)) + } + + memSizeBytes, err := strconv.ParseUint(kstats[0][2], 10, 64) + if err != nil { + return 0, err + } + + return memSizeBytes, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/mem/mem_windows.go new file mode 100644 index 00000000..cfdf8bd1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_windows.go @@ -0,0 +1,97 @@ +// +build windows + +package mem + +import ( + "context" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procGlobalMemoryStatusEx = common.Modkernel32.NewProc("GlobalMemoryStatusEx") + procGetPerformanceInfo = common.ModPsapi.NewProc("GetPerformanceInfo") +) + +type memoryStatusEx struct { + cbSize uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 // in bytes + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + ret := &VirtualMemoryStat{ + Total: memInfo.ullTotalPhys, + Available: memInfo.ullAvailPhys, + UsedPercent: float64(memInfo.dwMemoryLoad), + } + + ret.Used = ret.Total - ret.Available + return ret, nil +} + +type performanceInformation struct { + cb uint32 + commitTotal uint64 + commitLimit uint64 + commitPeak uint64 + physicalTotal uint64 + physicalAvailable uint64 + systemCache uint64 + kernelTotal uint64 + kernelPaged uint64 + kernelNonpaged uint64 + pageSize uint64 + handleCount uint32 + processCount uint32 + threadCount uint32 +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if mem == 0 { + return nil, windows.GetLastError() + } + tot := perfInfo.commitLimit * perfInfo.pageSize + used := perfInfo.commitTotal * perfInfo.pageSize + free := tot - used + var usedPercent float64 + if tot == 0 { + usedPercent = 0 + } else { + usedPercent = float64(used) / float64(tot) + } + ret := &SwapMemoryStat{ + Total: tot, + Used: used, + Free: free, + UsedPercent: usedPercent, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/net/net.go b/vendor/github.com/shirou/gopsutil/net/net.go new file mode 100644 index 00000000..c9a4bafe --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net.go @@ -0,0 +1,356 @@ +package net + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type IOCountersStat struct { + Name string `json:"name"` // interface name + BytesSent uint64 `json:"bytesSent"` // number of bytes sent + BytesRecv uint64 `json:"bytesRecv"` // number of bytes received + PacketsSent uint64 `json:"packetsSent"` // number of packets sent + PacketsRecv uint64 `json:"packetsRecv"` // number of packets received + Errin uint64 `json:"errin"` // total number of errors while receiving + Errout uint64 `json:"errout"` // total number of errors while sending + Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped + Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) + Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving + Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending + +} + +// Addr is implemented compatibility to psutil +type Addr struct { + IP string `json:"ip"` + Port uint32 `json:"port"` +} + +type ConnectionStat struct { + Fd uint32 `json:"fd"` + Family uint32 `json:"family"` + Type uint32 `json:"type"` + Laddr Addr `json:"localaddr"` + Raddr Addr `json:"remoteaddr"` + Status string `json:"status"` + Uids []int32 `json:"uids"` + Pid int32 `json:"pid"` +} + +// System wide stats about different network protocols +type ProtoCountersStat struct { + Protocol string `json:"protocol"` + Stats map[string]int64 `json:"stats"` +} + +// NetInterfaceAddr is designed for represent interface addresses +type InterfaceAddr struct { + Addr string `json:"addr"` +} + +type InterfaceStat struct { + Index int `json:"index"` + MTU int `json:"mtu"` // maximum transmission unit + Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100" + HardwareAddr string `json:"hardwareaddr"` // IEEE MAC-48, EUI-48 and EUI-64 form + Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast + Addrs []InterfaceAddr `json:"addrs"` +} + +type FilterStat struct { + ConnTrackCount int64 `json:"conntrackCount"` + ConnTrackMax int64 `json:"conntrackMax"` +} + +// ConntrackStat has conntrack summary info +type ConntrackStat struct { + Entries uint32 `json:"entries"` // Number of entries in the conntrack table + Searched uint32 `json:"searched"` // Number of conntrack table lookups performed + Found uint32 `json:"found"` // Number of searched entries which were successful + New uint32 `json:"new"` // Number of entries added which were not expected before + Invalid uint32 `json:"invalid"` // Number of packets seen which can not be tracked + Ignore uint32 `json:"ignore"` // Packets seen which are already connected to an entry + Delete uint32 `json:"delete"` // Number of entries which were removed + DeleteList uint32 `json:"delete_list"` // Number of entries which were put to dying list + Insert uint32 `json:"insert"` // Number of entries inserted into the list + InsertFailed uint32 `json:"insert_failed"` // # insertion attempted but failed (same entry exists) + Drop uint32 `json:"drop"` // Number of packets dropped due to conntrack failure. + EarlyDrop uint32 `json:"early_drop"` // Dropped entries to make room for new ones, if maxsize reached + IcmpError uint32 `json:"icmp_error"` // Subset of invalid. Packets that can't be tracked d/t error + ExpectNew uint32 `json:"expect_new"` // Entries added after an expectation was already present + ExpectCreate uint32 `json:"expect_create"` // Expectations added + ExpectDelete uint32 `json:"expect_delete"` // Expectations deleted + SearchRestart uint32 `json:"search_restart"` // Conntrack table lookups restarted due to hashtable resizes +} + +func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { + return &ConntrackStat{ + Entries: e, + Searched: s, + Found: f, + New: n, + Invalid: inv, + Ignore: ign, + Delete: del, + DeleteList: dlst, + Insert: ins, + InsertFailed: insfail, + Drop: drop, + EarlyDrop: edrop, + IcmpError: ie, + ExpectNew: en, + ExpectCreate: ec, + ExpectDelete: ed, + SearchRestart: sr, + } +} + +type ConntrackStatList struct { + items []*ConntrackStat +} + +func NewConntrackStatList() *ConntrackStatList { + return &ConntrackStatList{ + items: []*ConntrackStat{}, + } +} + +func (l *ConntrackStatList) Append(c *ConntrackStat) { + l.items = append(l.items, c) +} + +func (l *ConntrackStatList) Items() []ConntrackStat { + items := make([]ConntrackStat, len(l.items), len(l.items)) + for i, el := range l.items { + items[i] = *el + } + return items +} + +// Summary returns a single-element list with totals from all list items. +func (l *ConntrackStatList) Summary() []ConntrackStat { + summary := NewConntrackStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + for _, cs := range l.items { + summary.Entries += cs.Entries + summary.Searched += cs.Searched + summary.Found += cs.Found + summary.New += cs.New + summary.Invalid += cs.Invalid + summary.Ignore += cs.Ignore + summary.Delete += cs.Delete + summary.DeleteList += cs.DeleteList + summary.Insert += cs.Insert + summary.InsertFailed += cs.InsertFailed + summary.Drop += cs.Drop + summary.EarlyDrop += cs.EarlyDrop + summary.IcmpError += cs.IcmpError + summary.ExpectNew += cs.ExpectNew + summary.ExpectCreate += cs.ExpectCreate + summary.ExpectDelete += cs.ExpectDelete + summary.SearchRestart += cs.SearchRestart + } + return []ConntrackStat{*summary} +} + +var constMap = map[string]int{ + "unix": syscall.AF_UNIX, + "TCP": syscall.SOCK_STREAM, + "UDP": syscall.SOCK_DGRAM, + "IPv4": syscall.AF_INET, + "IPv6": syscall.AF_INET6, +} + +func (n IOCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConnectionStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ProtoCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (a Addr) String() string { + s, _ := json.Marshal(a) + return string(s) +} + +func (n InterfaceStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n InterfaceAddr) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConntrackStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func Interfaces() ([]InterfaceStat, error) { + return InterfacesWithContext(context.Background()) +} + +func InterfacesWithContext(ctx context.Context) ([]InterfaceStat, error) { + is, err := net.Interfaces() + if err != nil { + return nil, err + } + ret := make([]InterfaceStat, 0, len(is)) + for _, ifi := range is { + + var flags []string + if ifi.Flags&net.FlagUp != 0 { + flags = append(flags, "up") + } + if ifi.Flags&net.FlagBroadcast != 0 { + flags = append(flags, "broadcast") + } + if ifi.Flags&net.FlagLoopback != 0 { + flags = append(flags, "loopback") + } + if ifi.Flags&net.FlagPointToPoint != 0 { + flags = append(flags, "pointtopoint") + } + if ifi.Flags&net.FlagMulticast != 0 { + flags = append(flags, "multicast") + } + + r := InterfaceStat{ + Index: ifi.Index, + Name: ifi.Name, + MTU: ifi.MTU, + HardwareAddr: ifi.HardwareAddr.String(), + Flags: flags, + } + addrs, err := ifi.Addrs() + if err == nil { + r.Addrs = make([]InterfaceAddr, 0, len(addrs)) + for _, addr := range addrs { + r.Addrs = append(r.Addrs, InterfaceAddr{ + Addr: addr.String(), + }) + } + + } + ret = append(ret, r) + } + + return ret, nil +} + +func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { + r := IOCountersStat{ + Name: "all", + } + for _, nic := range n { + r.BytesRecv += nic.BytesRecv + r.PacketsRecv += nic.PacketsRecv + r.Errin += nic.Errin + r.Dropin += nic.Dropin + r.BytesSent += nic.BytesSent + r.PacketsSent += nic.PacketsSent + r.Errout += nic.Errout + r.Dropout += nic.Dropout + } + + return []IOCountersStat{r}, nil +} + +func parseNetLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 8 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + if len(f) == 8 { + f = append(f, f[7]) + f[7] = "unix" + } + + pid, err := strconv.Atoi(f[1]) + if err != nil { + return ConnectionStat{}, err + } + fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + if err != nil { + return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) + } + netFamily, ok := constMap[f[4]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4]) + } + netType, ok := constMap[f[7]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7]) + } + + var laddr, raddr Addr + if f[7] == "unix" { + laddr.IP = f[8] + } else { + laddr, raddr, err = parseNetAddr(f[8]) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8]) + } + } + + n := ConnectionStat{ + Fd: uint32(fd), + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(pid), + } + if len(f) == 10 { + n.Status = strings.Trim(f[9], "()") + } + + return n, nil +} + +func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + host, port, err := net.SplitHostPort(l) + if err != nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + addrs := strings.Split(line, "->") + if len(addrs) == 0 { + return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) + } + laddr, err = parse(addrs[0]) + if len(addrs) == 2 { // remote addr exists + raddr, err = parse(addrs[1]) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/net/net_darwin.go new file mode 100644 index 00000000..1daed869 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_darwin.go @@ -0,0 +1,293 @@ +// +build darwin + +package net + +import ( + "context" + "errors" + "fmt" + "github.com/shirou/gopsutil/internal/common" + "os/exec" + "regexp" + "strconv" + "strings" +) + +var ( + errNetstatHeader = errors.New("Can't parse header of netstat output") + netstatLinkRegexp = regexp.MustCompile(`^$`) +) + +const endOfLine = "\n" + +func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err error) { + var ( + numericValue uint64 + columns = strings.Fields(line) + ) + + if columns[0] == "Name" { + err = errNetstatHeader + return + } + + // try to extract the numeric value from + if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 { + numericValue, err = strconv.ParseUint(subMatch[1], 10, 64) + if err != nil { + return + } + linkIDUint := uint(numericValue) + linkID = &linkIDUint + } + + base := 1 + numberColumns := len(columns) + // sometimes Address is omitted + if numberColumns < 12 { + base = 0 + } + if numberColumns < 11 || numberColumns > 13 { + err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns) + return + } + + parsed := make([]uint64, 0, 7) + vv := []string{ + columns[base+3], // Ipkts == PacketsRecv + columns[base+4], // Ierrs == Errin + columns[base+5], // Ibytes == BytesRecv + columns[base+6], // Opkts == PacketsSent + columns[base+7], // Oerrs == Errout + columns[base+8], // Obytes == BytesSent + } + if len(columns) == 12 { + vv = append(vv, columns[base+10]) + } + + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil { + return + } + parsed = append(parsed, numericValue) + } + + stat = &IOCountersStat{ + Name: strings.Trim(columns[0], "*"), // remove the * that sometimes is on right on interface + PacketsRecv: parsed[0], + Errin: parsed[1], + BytesRecv: parsed[2], + PacketsSent: parsed[3], + Errout: parsed[4], + BytesSent: parsed[5], + } + if len(parsed) == 7 { + stat.Dropout = parsed[6] + } + return +} + +type netstatInterface struct { + linkID *uint + stat *IOCountersStat +} + +func parseNetstatOutput(output string) ([]netstatInterface, error) { + var ( + err error + lines = strings.Split(strings.Trim(output, endOfLine), endOfLine) + ) + + // number of interfaces is number of lines less one for the header + numberInterfaces := len(lines) - 1 + + interfaces := make([]netstatInterface, numberInterfaces) + // no output beside header + if numberInterfaces == 0 { + return interfaces, nil + } + + for index := 0; index < numberInterfaces; index++ { + nsIface := netstatInterface{} + if nsIface.stat, nsIface.linkID, err = parseNetstatLine(lines[index+1]); err != nil { + return nil, err + } + interfaces[index] = nsIface + } + return interfaces, nil +} + +// map that hold the name of a network interface and the number of usage +type mapInterfaceNameUsage map[string]uint + +func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { + output := make(mapInterfaceNameUsage) + for index := range ifaces { + if ifaces[index].linkID != nil { + ifaceName := ifaces[index].stat.Name + usage, ok := output[ifaceName] + if ok { + output[ifaceName] = usage + 1 + } else { + output[ifaceName] = 1 + } + } + } + return output +} + +func (min mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range min { + if usage > 1 { + return true + } + } + return false +} + +func (min mapInterfaceNameUsage) notTruncated() []string { + output := make([]string, 0) + for ifaceName, usage := range min { + if usage == 1 { + output = append(output, ifaceName) + } + } + return output +} + +// example of `netstat -ibdnW` output on yosemite +// Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop +// lo0 16384 869107 0 169411755 869107 0 169411755 0 0 +// lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - +// lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + var ( + ret []IOCountersStat + retIndex int + ) + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + + // try to get all interface metrics, and hope there won't be any truncated + out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + if err != nil { + return nil, err + } + + nsInterfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + + ifaceUsage := newMapInterfaceNameUsage(nsInterfaces) + notTruncated := ifaceUsage.notTruncated() + ret = make([]IOCountersStat, len(notTruncated)) + + if !ifaceUsage.isTruncated() { + // no truncated interface name, return stats of all interface with + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil { + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + } + } + } else { + // duplicated interface, list all interfaces + ifconfig, err := exec.LookPath("ifconfig") + if err != nil { + return nil, err + } + if out, err = invoke.CommandWithContext(ctx, ifconfig, "-l"); err != nil { + return nil, err + } + interfaceNames := strings.Fields(strings.TrimRight(string(out), endOfLine)) + + // for each of the interface name, run netstat if we don't have any stats yet + for _, interfaceName := range interfaceNames { + truncated := true + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil && nsInterfaces[index].stat.Name == interfaceName { + // handle the non truncated name to avoid execute netstat for them again + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + truncated = false + break + } + } + if truncated { + // run netstat with -I$ifacename + if out, err = invoke.CommandWithContext(ctx, netstat, "-ibdnWI"+interfaceName); err != nil { + return nil, err + } + parsedIfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + if len(parsedIfaces) == 0 { + // interface had been removed since `ifconfig -l` had been executed + continue + } + for index := range parsedIfaces { + if parsedIfaces[index].linkID != nil { + ret = append(ret, *parsedIfaces[index].stat) + break + } + } + } + } + } + + if pernic == false { + return getIOCountersAll(ret) + } + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for darwin") +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for darwin") +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/net/net_fallback.go new file mode 100644 index 00000000..0991347d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_fallback.go @@ -0,0 +1,57 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/net/net_freebsd.go new file mode 100644 index 00000000..2284d982 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_freebsd.go @@ -0,0 +1,133 @@ +// +build freebsd + +package net + +import ( + "context" + "errors" + "os/exec" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + if err != nil { + return nil, err + } + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 12 { + continue + } + base := 1 + // sometimes Address is omitted + if len(values) < 13 { + base = 0 + } + + parsed := make([]uint64, 0, 8) + vv := []string{ + values[base+3], // PacketsRecv + values[base+4], // Errin + values[base+5], // Dropin + values[base+6], // BytesRecvn + values[base+7], // PacketSent + values[base+8], // Errout + values[base+9], // BytesSent + values[base+11], // Dropout + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + Dropin: parsed[2], + BytesRecv: parsed[3], + PacketsSent: parsed[4], + Errout: parsed[5], + BytesSent: parsed[6], + Dropout: parsed[7], + } + ret = append(ret, n) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for freebsd") +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, errors.New("ConntrackStats not implemented for freebsd") +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for FreeBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for freebsd") +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_linux.go b/vendor/github.com/shirou/gopsutil/net/net_linux.go new file mode 100644 index 00000000..c8d70ed6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_linux.go @@ -0,0 +1,866 @@ +// +build linux + +package net + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +const ( // Conntrack Column numbers + CT_ENTRIES = iota + CT_SEARCHED + CT_FOUND + CT_NEW + CT_INVALID + CT_IGNORE + CT_DELETE + CT_DELETE_LIST + CT_INSERT + CT_INSERT_FAILED + CT_DROP + CT_EARLY_DROP + CT_ICMP_ERROR + CT_EXPECT_NEW + CT_EXPECT_CREATE + CT_EXPECT_DELETE + CT_SEARCH_RESTART +) + +// NetIOCounters returnes network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + filename := common.HostProc("net/dev") + return IOCountersByFile(pernic, filename) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + parts := make([]string, 2) + + statlen := len(lines) - 1 + + ret := make([]IOCountersStat, 0, statlen) + + for _, line := range lines[2:] { + separatorPos := strings.LastIndex(line, ":") + if separatorPos == -1 { + continue + } + parts[0] = line[0:separatorPos] + parts[1] = line[separatorPos+1:] + + interfaceName := strings.TrimSpace(parts[0]) + if interfaceName == "" { + continue + } + + fields := strings.Fields(strings.TrimSpace(parts[1])) + bytesRecv, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return ret, err + } + packetsRecv, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return ret, err + } + errIn, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return ret, err + } + dropIn, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return ret, err + } + fifoIn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return ret, err + } + bytesSent, err := strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return ret, err + } + packetsSent, err := strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return ret, err + } + errOut, err := strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return ret, err + } + dropOut, err := strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return ret, err + } + fifoOut, err := strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return ret, err + } + + nic := IOCountersStat{ + Name: interfaceName, + BytesRecv: bytesRecv, + PacketsRecv: packetsRecv, + Errin: errIn, + Dropin: dropIn, + Fifoin: fifoIn, + BytesSent: bytesSent, + PacketsSent: packetsSent, + Errout: errOut, + Dropout: dropOut, + Fifoout: fifoOut, + } + ret = append(ret, nic) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +var netProtocols = []string{ + "ip", + "icmp", + "icmpmsg", + "tcp", + "udp", + "udplite", +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// ip,icmp,icmpmsg,tcp,udp,udplite +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + if len(protocols) == 0 { + protocols = netProtocols + } + + stats := make([]ProtoCountersStat, 0, len(protocols)) + protos := make(map[string]bool, len(protocols)) + for _, p := range protocols { + protos[p] = true + } + + filename := common.HostProc("net/snmp") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + linecount := len(lines) + for i := 0; i < linecount; i++ { + line := lines[i] + r := strings.IndexRune(line, ':') + if r == -1 { + return nil, errors.New(filename + " is not fomatted correctly, expected ':'.") + } + proto := strings.ToLower(line[:r]) + if !protos[proto] { + // skip protocol and data line + i++ + continue + } + + // Read header line + statNames := strings.Split(line[r+2:], " ") + + // Read data line + i++ + statValues := strings.Split(lines[i][r+2:], " ") + if len(statNames) != len(statValues) { + return nil, errors.New(filename + " is not fomatted correctly, expected same number of columns.") + } + stat := ProtoCountersStat{ + Protocol: proto, + Stats: make(map[string]int64, len(statNames)), + } + for j := range statNames { + value, err := strconv.ParseInt(statValues[j], 10, 64) + if err != nil { + return nil, err + } + stat.Stats[statNames[j]] = value + } + stats = append(stats, stat) + } + return stats, nil +} + +// NetFilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count") + maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max") + + count, err := common.ReadInts(countfile) + + if err != nil { + return nil, err + } + stats := make([]FilterStat, 0, 1) + + max, err := common.ReadInts(maxfile) + if err != nil { + return nil, err + } + + payload := FilterStat{ + ConnTrackCount: count[0], + ConnTrackMax: max[0], + } + + stats = append(stats, payload) + return stats, nil +} + +// ConntrackStats returns more detailed info about the conntrack table +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +// ConntrackStatsWithContext returns more detailed info about the conntrack table +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return conntrackStatsFromFile(common.HostProc("net/stat/nf_conntrack"), percpu) +} + +// conntrackStatsFromFile returns more detailed info about the conntrack table +// from `filename` +// If 'percpu' is false, the result will contain exactly one item with totals/summary +func conntrackStatsFromFile(filename string, percpu bool) ([]ConntrackStat, error) { + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + statlist := NewConntrackStatList() + + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) == 17 && fields[0] != "entries" { + statlist.Append(NewConntrackStat( + common.HexToUint32(fields[CT_ENTRIES]), + common.HexToUint32(fields[CT_SEARCHED]), + common.HexToUint32(fields[CT_FOUND]), + common.HexToUint32(fields[CT_NEW]), + common.HexToUint32(fields[CT_INVALID]), + common.HexToUint32(fields[CT_IGNORE]), + common.HexToUint32(fields[CT_DELETE]), + common.HexToUint32(fields[CT_DELETE_LIST]), + common.HexToUint32(fields[CT_INSERT]), + common.HexToUint32(fields[CT_INSERT_FAILED]), + common.HexToUint32(fields[CT_DROP]), + common.HexToUint32(fields[CT_EARLY_DROP]), + common.HexToUint32(fields[CT_ICMP_ERROR]), + common.HexToUint32(fields[CT_EXPECT_NEW]), + common.HexToUint32(fields[CT_EXPECT_CREATE]), + common.HexToUint32(fields[CT_EXPECT_DELETE]), + common.HexToUint32(fields[CT_SEARCH_RESTART]), + )) + } + } + + if percpu { + return statlist.Items(), nil + } + return statlist.Summary(), nil +} + +// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h +var TCPStatuses = map[string]string{ + "01": "ESTABLISHED", + "02": "SYN_SENT", + "03": "SYN_RECV", + "04": "FIN_WAIT1", + "05": "FIN_WAIT2", + "06": "TIME_WAIT", + "07": "CLOSE", + "08": "CLOSE_WAIT", + "09": "LAST_ACK", + "0A": "LISTEN", + "0B": "CLOSING", +} + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} +var kindUNIX = netConnectionKindType{ + family: syscall.AF_UNIX, + filename: "unix", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "unix": {kindUNIX}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +type inodeMap struct { + pid int32 + fd uint32 +} + +type connTmp struct { + fd uint32 + family uint32 + sockType uint32 + laddr Addr + raddr Addr + status string + pid int32 + boundPid int32 + path string +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPid(kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMax(kind, 0, max) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + root := common.HostProc() + var err error + var inodes map[string][]inodeMap + if pid == 0 { + inodes, err = getProcInodesAll(root, 0) + } else { + inodes, err = getProcInodes(root, pid, 0) + if len(inodes) == 0 { + // no connection for the pid + return []ConnectionStat{}, nil + } + } + if err != nil { + return nil, fmt.Errorf("cound not get pid(s), %d: %s", pid, err) + } + return statsFromInodes(root, pid, tmap, inodes) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + root := common.HostProc() + var err error + var inodes map[string][]inodeMap + if pid == 0 { + inodes, err = getProcInodesAll(root, max) + } else { + inodes, err = getProcInodes(root, pid, max) + if len(inodes) == 0 { + // no connection for the pid + return []ConnectionStat{}, nil + } + } + if err != nil { + return nil, fmt.Errorf("cound not get pid(s), %d", pid) + } + return statsFromInodes(root, pid, tmap, inodes) +} + +func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap) ([]ConnectionStat, error) { + dupCheckMap := make(map[string]struct{}) + var ret []ConnectionStat + + var err error + for _, t := range tmap { + var path string + var connKey string + var ls []connTmp + if pid == 0 { + path = fmt.Sprintf("%s/net/%s", root, t.filename) + } else { + path = fmt.Sprintf("%s/%d/net/%s", root, pid, t.filename) + } + switch t.family { + case syscall.AF_INET, syscall.AF_INET6: + ls, err = processInet(path, t, inodes, pid) + case syscall.AF_UNIX: + ls, err = processUnix(path, t, inodes, pid) + } + if err != nil { + return nil, err + } + for _, c := range ls { + // Build TCP key to id the connection uniquely + // socket type, src ip, src port, dst ip, dst port and state should be enough + // to prevent duplications. + connKey = fmt.Sprintf("%d-%s:%d-%s:%d-%s", c.sockType, c.laddr.IP, c.laddr.Port, c.raddr.IP, c.raddr.Port, c.status) + if _, ok := dupCheckMap[connKey]; ok { + continue + } + + conn := ConnectionStat{ + Fd: c.fd, + Family: c.family, + Type: c.sockType, + Laddr: c.laddr, + Raddr: c.raddr, + Status: c.status, + Pid: c.pid, + } + if c.pid == 0 { + conn.Pid = c.boundPid + } else { + conn.Pid = c.pid + } + + // fetch process owner Real, effective, saved set, and filesystem UIDs + proc := process{Pid: conn.Pid} + conn.Uids, _ = proc.getUids() + + ret = append(ret, conn) + dupCheckMap[connKey] = struct{}{} + } + + } + + return ret, nil +} + +// getProcInodes returnes fd of the pid. +func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { + ret := make(map[string][]inodeMap) + + dir := fmt.Sprintf("%s/%d/fd", root, pid) + f, err := os.Open(dir) + if err != nil { + return ret, err + } + defer f.Close() + files, err := f.Readdir(max) + if err != nil { + return ret, err + } + for _, fd := range files { + inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, fd.Name()) + + inode, err := os.Readlink(inodePath) + if err != nil { + continue + } + if !strings.HasPrefix(inode, "socket:[") { + continue + } + // the process is using a socket + l := len(inode) + inode = inode[8 : l-1] + _, ok := ret[inode] + if !ok { + ret[inode] = make([]inodeMap, 0) + } + fd, err := strconv.Atoi(fd.Name()) + if err != nil { + continue + } + + i := inodeMap{ + pid: pid, + fd: uint32(fd), + } + ret[inode] = append(ret[inode], i) + } + return ret, nil +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occures import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + d, err := os.Open(common.HostProc()) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} + +// Note: the following is based off process_linux structs and methods +// we need these to fetch the owner of a process ID +// FIXME: Import process occures import cycle. +// see remarks on pids() +type process struct { + Pid int32 `json:"pid"` + uids []int32 +} + +// Uids returns user ids of the process as a slice of the int +func (p *process) getUids() ([]int32, error) { + err := p.fillFromStatus() + if err != nil { + return []int32{}, err + } + return p.uids, nil +} + +// Get status from /proc/(pid)/status +func (p *process) fillFromStatus() error { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "status") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Uid": + p.uids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, int32(v)) + } + } + } + return nil +} + +func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { + pids, err := Pids() + if err != nil { + return nil, err + } + ret := make(map[string][]inodeMap) + + for _, pid := range pids { + t, err := getProcInodes(root, pid, max) + if err != nil { + // skip if permission error or no longer exists + if os.IsPermission(err) || os.IsNotExist(err) || err == io.EOF { + continue + } + return ret, err + } + if len(t) == 0 { + continue + } + // TODO: update ret. + ret = updateMap(ret, t) + } + return ret, nil +} + +// decodeAddress decode addresse represents addr in proc/net/* +// ex: +// "0500000A:0016" -> "10.0.0.5", 22 +// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 +func decodeAddress(family uint32, src string) (Addr, error) { + t := strings.Split(src, ":") + if len(t) != 2 { + return Addr{}, fmt.Errorf("does not contain port, %s", src) + } + addr := t[0] + port, err := strconv.ParseInt("0x"+t[1], 0, 64) + if err != nil { + return Addr{}, fmt.Errorf("invalid port, %s", src) + } + decoded, err := hex.DecodeString(addr) + if err != nil { + return Addr{}, fmt.Errorf("decode error, %s", err) + } + var ip net.IP + // Assumes this is little_endian + if family == syscall.AF_INET { + ip = net.IP(Reverse(decoded)) + } else { // IPv6 + ip, err = parseIPv6HexString(decoded) + if err != nil { + return Addr{}, err + } + } + return Addr{ + IP: ip.String(), + Port: uint32(port), + }, nil +} + +// Reverse reverses array of bytes. +func Reverse(s []byte) []byte { + return ReverseWithContext(context.Background(), s) +} + +func ReverseWithContext(ctx context.Context, s []byte) []byte { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} + +// parseIPv6HexString parse array of bytes to IPv6 string +func parseIPv6HexString(src []byte) (net.IP, error) { + if len(src) != 16 { + return nil, fmt.Errorf("invalid IPv6 string") + } + + buf := make([]byte, 0, 16) + for i := 0; i < len(src); i += 4 { + r := Reverse(src[i : i+4]) + buf = append(buf, r...) + } + return net.IP(buf), nil +} + +func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + + if strings.HasSuffix(file, "6") && !common.PathExists(file) { + // IPv6 not supported, return empty. + return []connTmp{}, nil + } + + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + l := strings.Fields(string(line)) + if len(l) < 10 { + continue + } + laddr := l[1] + raddr := l[2] + status := l[3] + inode := l[9] + pid := int32(0) + fd := uint32(0) + i, exists := inodes[inode] + if exists { + pid = i[0].pid + fd = i[0].fd + } + if filterPid > 0 && filterPid != pid { + continue + } + if kind.sockType == syscall.SOCK_STREAM { + status = TCPStatuses[status] + } else { + status = "NONE" + } + la, err := decodeAddress(kind.family, laddr) + if err != nil { + continue + } + ra, err := decodeAddress(kind.family, raddr) + if err != nil { + continue + } + + ret = append(ret, connTmp{ + fd: fd, + family: kind.family, + sockType: kind.sockType, + laddr: la, + raddr: ra, + status: status, + pid: pid, + }) + } + + return ret, nil +} + +func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + tokens := strings.Fields(string(line)) + if len(tokens) < 6 { + continue + } + st, err := strconv.Atoi(tokens[4]) + if err != nil { + return nil, err + } + + inode := tokens[6] + + var pairs []inodeMap + pairs, exists := inodes[inode] + if !exists { + pairs = []inodeMap{ + {}, + } + } + for _, pair := range pairs { + if filterPid > 0 && filterPid != pair.pid { + continue + } + var path string + if len(tokens) == 8 { + path = tokens[len(tokens)-1] + } + ret = append(ret, connTmp{ + fd: pair.fd, + family: kind.family, + sockType: uint32(st), + laddr: Addr{ + IP: path, + }, + pid: pair.pid, + status: "NONE", + path: path, + }) + } + } + + return ret, nil +} + +func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap { + for key, value := range add { + a, exists := src[key] + if !exists { + src[key] = value + continue + } + src[key] = append(a, value...) + } + return src +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/net/net_openbsd.go new file mode 100644 index 00000000..3cf0a89d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_openbsd.go @@ -0,0 +1,320 @@ +// +build openbsd + +package net + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) + +func ParseNetstat(output string, mode string, + iocs map[string]IOCountersStat) error { + lines := strings.Split(output, "\n") + + exists := make([]string, 0, len(lines)-1) + + columns := 6 + if mode == "ind" { + columns = 10 + } + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + + if len(values) < columns { + continue + } + base := 1 + // sometimes Address is omitted + if len(values) < columns { + base = 0 + } + + parsed := make([]uint64, 0, 8) + var vv []string + if mode == "inb" { + vv = []string{ + values[base+3], // BytesRecv + values[base+4], // BytesSent + } + } else { + vv = []string{ + values[base+3], // Ipkts + values[base+4], // Ierrs + values[base+5], // Opkts + values[base+6], // Oerrs + values[base+8], // Drops + } + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return err + } + parsed = append(parsed, t) + } + exists = append(exists, values[0]) + + n, present := iocs[values[0]] + if !present { + n = IOCountersStat{Name: values[0]} + } + if mode == "inb" { + n.BytesRecv = parsed[0] + n.BytesSent = parsed[1] + } else { + n.PacketsRecv = parsed[0] + n.Errin = parsed[1] + n.PacketsSent = parsed[2] + n.Errout = parsed[3] + n.Dropin = parsed[4] + n.Dropout = parsed[4] + } + + iocs[n.Name] = n + } + return nil +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, "-inb") + if err != nil { + return nil, err + } + out2, err := invoke.CommandWithContext(ctx, netstat, "-ind") + if err != nil { + return nil, err + } + iocs := make(map[string]IOCountersStat) + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + + err = ParseNetstat(string(out), "inb", iocs) + if err != nil { + return nil, err + } + err = ParseNetstat(string(out2), "ind", iocs) + if err != nil { + return nil, err + } + + for _, ioc := range iocs { + ret = append(ret, ioc) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for openbsd") +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for OpenBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for openbsd") +} + +func parseNetstatLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 5 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + var netType, netFamily uint32 + switch f[0] { + case "tcp": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET + case "udp": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET + case "tcp6": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET6 + case "udp6": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET6 + default: + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0]) + } + + laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4]) + } + + n := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(0), // not supported + } + if len(f) == 6 { + n.Status = f[5] + } + + return n, nil +} + +func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) + } + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + laddr, err = parse(local) + if remote != "*.*" { // remote addr exists + raddr, err = parse(remote) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-na"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + // nothing to add + case "inet4": + args = append(args, "-finet") + case "inet6": + args = append(args, "-finet6") + case "tcp": + args = append(args, "-ptcp") + case "tcp4": + args = append(args, "-ptcp", "-finet") + case "tcp6": + args = append(args, "-ptcp", "-finet6") + case "udp": + args = append(args, "-pudp") + case "udp4": + args = append(args, "-pudp", "-finet") + case "udp6": + args = append(args, "-pudp", "-finet6") + case "unix": + return ret, common.ErrNotImplementedError + } + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, args...) + + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + for _, line := range lines { + if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + continue + } + n, err := parseNetstatLine(line) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_unix.go b/vendor/github.com/shirou/gopsutil/net/net_unix.go new file mode 100644 index 00000000..4451b545 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_unix.go @@ -0,0 +1,96 @@ +// +build freebsd darwin + +package net + +import ( + "context" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPid(kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-i"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + args = append(args, "tcp", "-i", "udp") + case "inet4": + args = append(args, "4") + case "inet6": + args = append(args, "6") + case "tcp": + args = append(args, "tcp") + case "tcp4": + args = append(args, "4tcp") + case "tcp6": + args = append(args, "6tcp") + case "udp": + args = append(args, "udp") + case "udp4": + args = append(args, "6udp") + case "udp6": + args = append(args, "6udp") + case "unix": + args = []string{"-U"} + } + + r, err := common.CallLsofWithContext(ctx, invoke, pid, args...) + if err != nil { + return nil, err + } + for _, rr := range r { + if strings.HasPrefix(rr, "COMMAND") { + continue + } + n, err := parseNetLine(rr) + if err != nil { + + continue + } + + ret = append(ret, n) + } + + return ret, nil +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_windows.go b/vendor/github.com/shirou/gopsutil/net/net_windows.go new file mode 100644 index 00000000..d40e4163 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_windows.go @@ -0,0 +1,739 @@ +// +build windows + +package net + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + procGetExtendedTCPTable = modiphlpapi.NewProc("GetExtendedTcpTable") + procGetExtendedUDPTable = modiphlpapi.NewProc("GetExtendedUdpTable") + procGetIfEntry2 = modiphlpapi.NewProc("GetIfEntry2") +) + +const ( + TCPTableBasicListener = iota + TCPTableBasicConnections + TCPTableBasicAll + TCPTableOwnerPIDListener + TCPTableOwnerPIDConnections + TCPTableOwnerPIDAll + TCPTableOwnerModuleListener + TCPTableOwnerModuleConnections + TCPTableOwnerModuleAll +) + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +// https://github.com/microsoft/ethr/blob/aecdaf923970e5a9b4c461b4e2e3963d781ad2cc/plt_windows.go#L114-L170 +type guid struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +const ( + maxStringSize = 256 + maxPhysAddressLength = 32 + pad0for64_4for32 = 0 +) + +type mibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid guid + Alias [maxStringSize + 1]uint16 + Description [maxStringSize + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [maxPhysAddressLength]uint8 + PermanentPhysicalAddress [maxPhysAddressLength]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint32 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid guid + ConnectionType uint32 + padding1 [pad0for64_4for32]byte + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + var counters []IOCountersStat + + err = procGetIfEntry2.Find() + if err == nil { // Vista+, uint64 values (issue#693) + for _, ifi := range ifs { + c := IOCountersStat{ + Name: ifi.Name, + } + + row := mibIfRow2{InterfaceIndex: uint32(ifi.Index)} + ret, _, err := procGetIfEntry2.Call(uintptr(unsafe.Pointer(&row))) + if ret != 0 { + return nil, os.NewSyscallError("GetIfEntry2", err) + } + c.BytesSent = uint64(row.OutOctets) + c.BytesRecv = uint64(row.InOctets) + c.PacketsSent = uint64(row.OutUcastPkts) + c.PacketsRecv = uint64(row.InUcastPkts) + c.Errin = uint64(row.InErrors) + c.Errout = uint64(row.OutErrors) + c.Dropin = uint64(row.InDiscards) + c.Dropout = uint64(row.OutDiscards) + + counters = append(counters, c) + } + } else { // WinXP fallback, uint32 values + for _, ifi := range ifs { + c := IOCountersStat{ + Name: ifi.Name, + } + + row := windows.MibIfRow{Index: uint32(ifi.Index)} + err = windows.GetIfEntry(&row) + if err != nil { + return nil, os.NewSyscallError("GetIfEntry", err) + } + c.BytesSent = uint64(row.OutOctets) + c.BytesRecv = uint64(row.InOctets) + c.PacketsSent = uint64(row.OutUcastPkts) + c.PacketsRecv = uint64(row.InUcastPkts) + c.Errin = uint64(row.InErrors) + c.Errout = uint64(row.OutErrors) + c.Dropin = uint64(row.InDiscards) + c.Dropout = uint64(row.OutDiscards) + + counters = append(counters, c) + } + } + + if !pernic { + return getIOCountersAll(counters) + } + return counters, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +// Return a list of network connections +// Available kind: +// reference to netConnectionKindMap +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +// ConnectionsPid Return a list of network connections opened by a process +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + return getProcInet(tmap, pid) +} + +func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, error) { + stats := make([]ConnectionStat, 0) + + for _, kind := range kinds { + s, err := getNetStatWithKind(kind) + if err != nil { + continue + } + + if pid == 0 { + stats = append(stats, s...) + } else { + for _, ns := range s { + if ns.Pid != pid { + continue + } + stats = append(stats, ns) + } + } + } + + return stats, nil +} + +func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) { + if kindType.filename == "" { + return nil, fmt.Errorf("kind filename must be required") + } + + switch kindType.filename { + case kindTCP4.filename: + return getTCPConnections(kindTCP4.family) + case kindTCP6.filename: + return getTCPConnections(kindTCP6.family) + case kindUDP4.filename: + return getUDPConnections(kindUDP4.family) + case kindUDP6.filename: + return getUDPConnections(kindUDP6.family) + } + + return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for windows") +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Windows +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for windows") +} + +func getTableUintptr(family uint32, buf []byte) uintptr { + var ( + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + + p uintptr + ) + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + return p +} + +func getTableInfo(filename string, table interface{}) (index, step, length int) { + switch filename { + case kindTCP4.filename: + index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).Table)) + length = int(table.(pmibTCPTableOwnerPidAll).DwNumEntries) + case kindTCP6.filename: + index = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).Table)) + length = int(table.(pmibTCP6TableOwnerPidAll).DwNumEntries) + case kindUDP4.filename: + index = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).Table)) + length = int(table.(pmibUDPTableOwnerPid).DwNumEntries) + case kindUDP6.filename: + index = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).Table)) + length = int(table.(pmibUDP6TableOwnerPid).DwNumEntries) + } + + return +} + +func getTCPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + + err := getExtendedTcpTable(p, + &size, + true, + family, + tcpTableOwnerPidAll, + 0) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var ( + index, step int + length int + ) + + stats := make([]ConnectionStat, 0) + switch family { + case kindTCP4.family: + index, step, length = getTableInfo(kindTCP4.filename, pmibTCPTable) + case kindTCP6.family: + index, step, length = getTableInfo(kindTCP6.filename, pmibTCP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindTCP4.family: + mibs := (*mibTCPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindTCP6.family: + mibs := (*mibTCP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +func getUDPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibUDPTable pmibUDPTableOwnerPid + pmibUDP6Table pmibUDP6TableOwnerPid + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindUDP4.family: + if len(buf) > 0 { + pmibUDPTable = (*mibUDPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } + case kindUDP6.family: + if len(buf) > 0 { + pmibUDP6Table = (*mibUDP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } + } + + err := getExtendedUdpTable( + p, + &size, + true, + family, + udpTableOwnerPid, + 0, + ) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var ( + index, step, length int + ) + + stats := make([]ConnectionStat, 0) + switch family { + case kindUDP4.family: + index, step, length = getTableInfo(kindUDP4.filename, pmibUDPTable) + case kindUDP6.family: + index, step, length = getTableInfo(kindUDP6.filename, pmibUDP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindUDP4.family: + mibs := (*mibUDPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindUDP4.family: + mibs := (*mibUDP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +// tcpStatuses https://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx +var tcpStatuses = map[mibTCPState]string{ + 1: "CLOSED", + 2: "LISTEN", + 3: "SYN_SENT", + 4: "SYN_RECEIVED", + 5: "ESTABLISHED", + 6: "FIN_WAIT_1", + 7: "FIN_WAIT_2", + 8: "CLOSE_WAIT", + 9: "CLOSING", + 10: "LAST_ACK", + 11: "TIME_WAIT", + 12: "DELETE", +} + +func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getUintptrFromBool(b bool) uintptr { + if b { + return 1 + } + return 0 +} + +const anySize = 1 + +// type MIB_TCP_STATE int32 +type mibTCPState int32 + +type tcpTableClass int32 + +const ( + tcpTableBasicListener tcpTableClass = iota + tcpTableBasicConnections + tcpTableBasicAll + tcpTableOwnerPidListener + tcpTableOwnerPidConnections + tcpTableOwnerPidAll + tcpTableOwnerModuleListener + tcpTableOwnerModuleConnections + tcpTableOwnerModuleAll +) + +type udpTableClass int32 + +const ( + udpTableBasic udpTableClass = iota + udpTableOwnerPid + udpTableOwnerModule +) + +// TCP + +type mibTCPRowOwnerPid struct { + DwState uint32 + DwLocalAddr uint32 + DwLocalPort uint32 + DwRemoteAddr uint32 + DwRemotePort uint32 + DwOwningPid uint32 +} + +func (m *mibTCPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP4.family, + Type: kindTCP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv4HexString(m.DwRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCPRowOwnerPid +} + +type mibTCP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + UcRemoteAddr [16]byte + DwRemoteScopeId uint32 + DwRemotePort uint32 + DwState uint32 + DwOwningPid uint32 +} + +func (m *mibTCP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP6.family, + Type: kindTCP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv6HexString(m.UcRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCP6RowOwnerPid +} + +type pmibTCPTableOwnerPidAll *mibTCPTableOwnerPid +type pmibTCP6TableOwnerPidAll *mibTCP6TableOwnerPid + +// UDP + +type mibUDPRowOwnerPid struct { + DwLocalAddr uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP4.family, + Type: kindUDP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDPRowOwnerPid +} + +type mibUDP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP6.family, + Type: kindUDP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDP6RowOwnerPid +} + +type pmibUDPTableOwnerPid *mibUDPTableOwnerPid +type pmibUDP6TableOwnerPid *mibUDP6TableOwnerPid + +func decodePort(port uint32) uint16 { + return syscall.Ntohs(uint16(port)) +} + +func parseIPv4HexString(addr uint32) string { + return fmt.Sprintf("%d.%d.%d.%d", addr&255, addr>>8&255, addr>>16&255, addr>>24&255) +} + +func parseIPv6HexString(addr [16]byte) string { + var ret [16]byte + for i := 0; i < 16; i++ { + ret[i] = uint8(addr[i]) + } + + // convert []byte to net.IP + ip := net.IP(ret[:]) + return ip.String() +} diff --git a/vendor/github.com/shirou/gopsutil/process/process.go b/vendor/github.com/shirou/gopsutil/process/process.go new file mode 100644 index 00000000..87b647be --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process.go @@ -0,0 +1,315 @@ +package process + +import ( + "context" + "encoding/json" + "errors" + "math" + "runtime" + "sort" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/mem" +) + +var ( + invoke common.Invoker = common.Invoke{} + ErrorNoChildren = errors.New("process does not have children") + ErrorProcessNotRunning = errors.New("process does not exist") +) + +type Process struct { + Pid int32 `json:"pid"` + name string + status string + parent int32 + numCtxSwitches *NumCtxSwitchesStat + uids []int32 + gids []int32 + numThreads int32 + memInfo *MemoryInfoStat + sigInfo *SignalInfoStat + createTime int64 + + lastCPUTimes *cpu.TimesStat + lastCPUTime time.Time + + tgid int32 +} + +type OpenFilesStat struct { + Path string `json:"path"` + Fd uint64 `json:"fd"` +} + +type MemoryInfoStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + HWM uint64 `json:"hwm"` // bytes + Data uint64 `json:"data"` // bytes + Stack uint64 `json:"stack"` // bytes + Locked uint64 `json:"locked"` // bytes + Swap uint64 `json:"swap"` // bytes +} + +type SignalInfoStat struct { + PendingProcess uint64 `json:"pending_process"` + PendingThread uint64 `json:"pending_thread"` + Blocked uint64 `json:"blocked"` + Ignored uint64 `json:"ignored"` + Caught uint64 `json:"caught"` +} + +type RlimitStat struct { + Resource int32 `json:"resource"` + Soft int32 `json:"soft"` //TODO too small. needs to be uint64 + Hard int32 `json:"hard"` //TODO too small. needs to be uint64 + Used uint64 `json:"used"` +} + +type IOCountersStat struct { + ReadCount uint64 `json:"readCount"` + WriteCount uint64 `json:"writeCount"` + ReadBytes uint64 `json:"readBytes"` + WriteBytes uint64 `json:"writeBytes"` +} + +type NumCtxSwitchesStat struct { + Voluntary int64 `json:"voluntary"` + Involuntary int64 `json:"involuntary"` +} + +type PageFaultsStat struct { + MinorFaults uint64 `json:"minorFaults"` + MajorFaults uint64 `json:"majorFaults"` + ChildMinorFaults uint64 `json:"childMinorFaults"` + ChildMajorFaults uint64 `json:"childMajorFaults"` +} + +// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h +// from libc6-dev package in Ubuntu 16.10 +const ( + RLIMIT_CPU int32 = 0 + RLIMIT_FSIZE int32 = 1 + RLIMIT_DATA int32 = 2 + RLIMIT_STACK int32 = 3 + RLIMIT_CORE int32 = 4 + RLIMIT_RSS int32 = 5 + RLIMIT_NPROC int32 = 6 + RLIMIT_NOFILE int32 = 7 + RLIMIT_MEMLOCK int32 = 8 + RLIMIT_AS int32 = 9 + RLIMIT_LOCKS int32 = 10 + RLIMIT_SIGPENDING int32 = 11 + RLIMIT_MSGQUEUE int32 = 12 + RLIMIT_NICE int32 = 13 + RLIMIT_RTPRIO int32 = 14 + RLIMIT_RTTIME int32 = 15 +) + +func (p Process) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +func (o OpenFilesStat) String() string { + s, _ := json.Marshal(o) + return string(s) +} + +func (m MemoryInfoStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (r RlimitStat) String() string { + s, _ := json.Marshal(r) + return string(s) +} + +func (i IOCountersStat) String() string { + s, _ := json.Marshal(i) + return string(s) +} + +func (p NumCtxSwitchesStat) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +// Pids returns a slice of process ID list which are running now. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + pids, err := pidsWithContext(ctx) + sort.Slice(pids, func(i, j int) bool { return pids[i] < pids[j] }) + return pids, err +} + +// NewProcess creates a new Process instance, it only stores the pid and +// checks that the process exists. Other method on Process can be used +// to get more information about the process. An error will be returned +// if the process does not exist. +func NewProcess(pid int32) (*Process, error) { + p := &Process{Pid: pid} + + exists, err := PidExists(pid) + if err != nil { + return p, err + } + if !exists { + return p, ErrorProcessNotRunning + } + go p.CreateTime() + return p, nil +} + +func PidExists(pid int32) (bool, error) { + return PidExistsWithContext(context.Background(), pid) +} + +// Background returns true if the process is in background, false otherwise. +func (p *Process) Background() (bool, error) { + return p.BackgroundWithContext(context.Background()) +} + +func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) { + fg, err := p.ForegroundWithContext(ctx) + if err != nil { + return false, err + } + return !fg, err +} + +// If interval is 0, return difference from last call(non-blocking). +// If interval > 0, wait interval sec and return diffrence between start and end. +func (p *Process) Percent(interval time.Duration) (float64, error) { + return p.PercentWithContext(context.Background(), interval) +} + +func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) { + cpuTimes, err := p.Times() + if err != nil { + return 0, err + } + now := time.Now() + + if interval > 0 { + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + time.Sleep(interval) + cpuTimes, err = p.Times() + now = time.Now() + if err != nil { + return 0, err + } + } else { + if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil + } + } + + numcpu := runtime.NumCPU() + delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu) + ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu) + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return ret, nil +} + +// IsRunning returns whether the process is still running or not. +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + createTime, err := p.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + p2, err := NewProcess(p.Pid) + if err == ErrorProcessNotRunning { + return false, nil + } + createTime2, err := p2.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + return createTime == createTime2, nil +} + +// CreateTime returns created time of the process in milliseconds since the epoch, in UTC. +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + if p.createTime != 0 { + return p.createTime, nil + } + createTime, err := p.createTimeWithContext(ctx) + p.createTime = createTime + return p.createTime, err +} + +func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 { + if delta == 0 { + return 0 + } + delta_proc := t2.Total() - t1.Total() + overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) + return math.Min(100, math.Max(0, overall_percent)) +} + +// MemoryPercent returns how many percent of the total RAM this process uses +func (p *Process) MemoryPercent() (float32, error) { + return p.MemoryPercentWithContext(context.Background()) +} + +func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) { + machineMemory, err := mem.VirtualMemory() + if err != nil { + return 0, err + } + total := machineMemory.Total + + processMemory, err := p.MemoryInfo() + if err != nil { + return 0, err + } + used := processMemory.RSS + + return float32(math.Min(100, math.Max(0, (100*float64(used)/float64(total))))), nil +} + +// CPU_Percent returns how many percent of the CPU time this process uses +func (p *Process) CPUPercent() (float64, error) { + return p.CPUPercentWithContext(context.Background()) +} + +func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { + crt_time, err := p.CreateTime() + if err != nil { + return 0, err + } + + cput, err := p.Times() + if err != nil { + return 0, err + } + + created := time.Unix(0, crt_time*int64(time.Millisecond)) + totalTime := time.Since(created).Seconds() + if totalTime <= 0 { + return 0, nil + } + + return math.Min(100, math.Max(0, 100*cput.Total()/totalTime)), nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/process/process_darwin.go new file mode 100644 index 00000000..198cce2c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin.go @@ -0,0 +1,640 @@ +// +build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +const ( + ClockTicks = 100 // C.sysconf(C._SC_CLK_TCK) +) + +type _Ctype_struct___0 struct { + Pad uint64 +} + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + pids, err := callPsWithContext(ctx, "pid", 0, false) + if err != nil { + return ret, err + } + + for _, pid := range pids { + v, err := strconv.Atoi(pid[0]) + if err != nil { + return ret, err + } + ret = append(ret, int32(v)) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + r, err := callPsWithContext(ctx, "ppid", p.Pid, false) + if err != nil { + return 0, err + } + + v, err := strconv.Atoi(r[0][0]) + if err != nil { + return 0, err + } + + return int32(v), err +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Proc.P_comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false) + if err != nil { + return "", err + } + return strings.Join(r[0], " "), err +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. Because of current deficiencies in the way that the command +// line arguments are found, single arguments that have spaces in the will actually be +// reported as two separate items. In order to do something better CGO would be needed +// to use the native darwin functions. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false) + if err != nil { + return nil, err + } + return r[0], err +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + r, err := callPsWithContext(ctx, "etime", p.Pid, false) + if err != nil { + return 0, err + } + + elapsedSegments := strings.Split(strings.Replace(r[0][0], "-", ":", 1), ":") + var elapsedDurations []time.Duration + for i := len(elapsedSegments) - 1; i >= 0; i-- { + p, err := strconv.ParseInt(elapsedSegments[i], 10, 0) + if err != nil { + return 0, err + } + elapsedDurations = append(elapsedDurations, time.Duration(p)) + } + + var elapsed = time.Duration(elapsedDurations[0]) * time.Second + if len(elapsedDurations) > 1 { + elapsed += time.Duration(elapsedDurations[1]) * time.Minute + } + if len(elapsedDurations) > 2 { + elapsed += time.Duration(elapsedDurations[2]) * time.Hour + } + if len(elapsedDurations) > 3 { + elapsed += time.Duration(elapsedDurations[3]) * time.Hour * 24 + } + + start := time.Now().Add(-elapsed) + return start.Unix() * 1000, nil +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + rr, err := common.CallLsofWithContext(ctx, invoke, p.Pid, "-FR") + if err != nil { + return nil, err + } + for _, r := range rr { + if strings.HasPrefix(r, "p") { // skip if process + continue + } + l := string(r) + v, err := strconv.Atoi(strings.Replace(l, "R", "", 1)) + if err != nil { + return nil, err + } + return NewProcess(int32(v)) + } + return nil, fmt.Errorf("could not find parent line") +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false) + if err != nil { + return "", err + } + + return r[0][0], err +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := int32(k.Eproc.Ucred.UID) + + return []int32{userEffectiveUID}, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Ucred.Ngroups), int32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true) + if err != nil { + return 0, err + } + return int32(len(r)), nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} + +func convertCPUTimes(s string) (ret float64, err error) { + var t int + var _tmp string + if strings.Contains(s, ":") { + _t := strings.Split(s, ":") + switch len(_t) { + case 3: + hour, err := strconv.Atoi(_t[0]) + if err != nil { + return ret, err + } + t += hour * 60 * 60 * ClockTicks + + mins, err := strconv.Atoi(_t[1]) + if err != nil { + return ret, err + } + t += mins * 60 * ClockTicks + _tmp = _t[2] + case 2: + mins, err := strconv.Atoi(_t[0]) + if err != nil { + return ret, err + } + t += mins * 60 * ClockTicks + _tmp = _t[1] + case 1, 0: + _tmp = s + default: + return ret, fmt.Errorf("wrong cpu time string") + } + } else { + _tmp = s + } + + _t := strings.Split(_tmp, ".") + if err != nil { + return ret, err + } + h, err := strconv.Atoi(_t[0]) + t += h * ClockTicks + h, err = strconv.Atoi(_t[1]) + t += h + return float64(t) / ClockTicks, nil +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false) + + if err != nil { + return nil, err + } + + utime, err := convertCPUTimes(r[0][0]) + if err != nil { + return nil, err + } + stime, err := convertCPUTimes(r[0][1]) + if err != nil { + return nil, err + } + + ret := &cpu.TimesStat{ + CPU: "cpu", + User: utime, + System: stime, + } + return ret, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false) + if err != nil { + return nil, err + } + rss, err := strconv.Atoi(r[0][0]) + if err != nil { + return nil, err + } + vms, err := strconv.Atoi(r[0][1]) + if err != nil { + return nil, err + } + pagein, err := strconv.Atoi(r[0][2]) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(rss) * 1024, + VMS: uint64(vms) * 1024, + Swap: uint64(pagein), + } + + return ret, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPid("all", p.Pid) +} + +// Connections returns a slice of net.ConnectionStat used by the process at most `max` +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMax("all", p.Pid, max) +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + + err := common.Read(br, binary.LittleEndian, &k) + if err != nil { + return k, err + } + + return k, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} + procK := KinfoProc{} + length := uint64(unsafe.Sizeof(procK)) + buf := make([]byte, length) + _, _, syserr := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if syserr != 0 { + return nil, syserr + } + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + + return &k, nil +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool) ([][]string, error) { + bin, err := exec.LookPath("ps") + if err != nil { + return [][]string{}, err + } + + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-ax", "-o", arg} + } else if threadOption { + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + } else { + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + out, err := invoke.CommandWithContext(ctx, bin, cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go new file mode 100644 index 00000000..f8e92238 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go @@ -0,0 +1,234 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *uint64 + Sess *Session + Pcred Upcred + Ucred Uucred + Pad_cgo_0 [4]byte + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Pad_cgo_1 [2]byte + Tdev int32 + Tpgid int32 + Pad_cgo_2 [4]byte + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Pad_cgo_3 [2]byte + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_4 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct { + Link _Ctype_struct___0 + Ref uint64 + Posix Posix_cred + Label *Label + Audit Au_session +} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Pad_cgo_0 [4]byte + Dummy2 *int8 + Dummy3 [5]int32 + Pad_cgo_1 [4]byte + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct { + UID uint32 + Ruid uint32 + Svuid uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 + Rgid uint32 + Svgid uint32 + Gmuid uint32 + Flags int32 +} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} +type AuMask struct { + Success uint32 + Failure uint32 +} +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go new file mode 100644 index 00000000..f8e92238 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go @@ -0,0 +1,234 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *uint64 + Sess *Session + Pcred Upcred + Ucred Uucred + Pad_cgo_0 [4]byte + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Pad_cgo_1 [2]byte + Tdev int32 + Tpgid int32 + Pad_cgo_2 [4]byte + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Pad_cgo_3 [2]byte + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_4 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct { + Link _Ctype_struct___0 + Ref uint64 + Posix Posix_cred + Label *Label + Audit Au_session +} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Pad_cgo_0 [4]byte + Dummy2 *int8 + Dummy3 [5]int32 + Pad_cgo_1 [4]byte + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct { + UID uint32 + Ruid uint32 + Svuid uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 + Rgid uint32 + Svgid uint32 + Gmuid uint32 + Flags int32 +} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} +type AuMask struct { + Success uint32 + Failure uint32 +} +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_cgo.go new file mode 100644 index 00000000..a8081775 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_cgo.go @@ -0,0 +1,30 @@ +// +build darwin +// +build cgo + +package process + +// #include +// #include +import "C" +import ( + "context" + "fmt" + "unsafe" +) + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + var c C.char // need a var for unsafe.Sizeof need a var + const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) + buffer := (*C.char)(C.malloc(C.size_t(bufsize))) + defer C.free(unsafe.Pointer(buffer)) + + ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) + if err != nil { + return "", err + } + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return C.GoString(buffer), nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_nocgo.go new file mode 100644 index 00000000..86466fde --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_nocgo.go @@ -0,0 +1,34 @@ +// +build darwin +// +build !cgo + +package process + +import ( + "context" + "fmt" + "os/exec" + "strconv" + "strings" +) + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + lsof_bin, err := exec.LookPath("lsof") + if err != nil { + return "", err + } + out, err := invoke.CommandWithContext(ctx, lsof_bin, "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") + if err != nil { + return "", fmt.Errorf("bad call to lsof: %s", err) + } + txtFound := 0 + lines := strings.Split(string(out), "\n") + for i := 1; i < len(lines); i += 2 { + if lines[i] == "ftxt" { + txtFound++ + if txtFound == 2 { + return lines[i-1][1:], nil + } + } + } + return "", fmt.Errorf("missing txt data returned by lsof") +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/process/process_fallback.go new file mode 100644 index 00000000..1cb55c8b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_fallback.go @@ -0,0 +1,332 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" +) + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct { +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + pids, err := PidsWithContext(ctx) + if err != nil { + return false, err + } + + for _, i := range pids { + if i == pid { + return true, err + } + } + + return false, err +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return []string{}, common.ErrNotImplementedError +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return []OpenFilesStat{}, common.ErrNotImplementedError +} +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return []net.IOCountersStat{}, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) SendSignal(sig syscall.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + return common.ErrNotImplementedError +} +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd.go new file mode 100644 index 00000000..3da66b5d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd.go @@ -0,0 +1,498 @@ +// +build freebsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "os/exec" + "strconv" + "strings" + + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + net "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := Processes() + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(buf), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if len(buf) == 0 { + return nil, nil + } + if buf[len(buf)-1] == 0 { + buf = buf[:len(buf)-1] + } + parts := bytes.Split(buf, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return p, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + var s string + switch k.Stat { + case SIDL: + s = "I" + case SRUN: + s = "R" + case SSLEEP: + s = "S" + case SSTOP: + s = "T" + case SZOMB: + s = "Z" + case SWAIT: + s = "W" + case SLOCK: + s = "L" + } + + return s, nil +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]int32, 0, 3) + + uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + + return uids, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Rusage.Inblock), + WriteCount: uint64(k.Rusage.Oublock), + }, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Numthreads, nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Rusage.Utime.Sec) + float64(k.Rusage.Utime.Usec)/1000000, + System: float64(k.Rusage.Stime.Sec) + float64(k.Rusage.Stime.Usec)/1000000, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + v, err := unix.Sysctl("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := common.LittleEndian.Uint16([]byte(v)) + + return &MemoryInfoStat{ + RSS: uint64(k.Rssize) * uint64(pageSize), + VMS: uint64(k.Size), + }, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +// Connections returns a slice of net.ConnectionStat used by the process at most `max` +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + mib := []int32{CTLKern, KernProc, KernProcProc, 0} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcess(int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} + +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} + + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go new file mode 100644 index 00000000..08ab333b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x300 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int32 + Sparelongs [12]int32 + Sflag int32 + Tdflags int32 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go new file mode 100644 index 00000000..560e627d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go new file mode 100644 index 00000000..81ae0b9a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int32 + Max int32 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [4]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_linux.go b/vendor/github.com/shirou/gopsutil/process/process_linux.go new file mode 100644 index 00000000..fab7b558 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_linux.go @@ -0,0 +1,1272 @@ +// +build linux + +package process + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +var PageSize = uint64(os.Getpagesize()) + +const ( + PrioProcess = 0 // linux/resource.h + ClockTicks = 100 // C.sysconf(C._SC_CLK_TCK) +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + Shared uint64 `json:"shared"` // bytes + Text uint64 `json:"text"` // bytes + Lib uint64 `json:"lib"` // bytes + Data uint64 `json:"data"` // bytes + Dirty uint64 `json:"dirty"` // bytes +} + +func (m MemoryInfoExStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +// String returns JSON value of the process. +func (m MemoryMapsStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +// Ppid returns Parent Process ID of the process. +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + _, ppid, _, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return -1, err + } + return ppid, nil +} + +// Name returns name of the process. +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + if p.name == "" { + if err := p.fillFromStatusWithContext(ctx); err != nil { + return "", err + } + } + return p.name, nil +} + +// Tgid returns tgid, a Linux-synonym for user-space Pid +func (p *Process) Tgid() (int32, error) { + if p.tgid == 0 { + if err := p.fillFromStatusWithContext(context.Background()); err != nil { + return 0, err + } + } + return p.tgid, nil +} + +// Exe returns executable path of the process. +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return p.fillFromExeWithContext(ctx) +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return p.fillFromCmdlineWithContext(ctx) +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.fillSliceFromCmdlineWithContext(ctx) +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + _, _, _, createTime, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return 0, err + } + return createTime, nil +} + +// Cwd returns current working directory of the process. +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return p.fillFromCwdWithContext(ctx) +} + +// Parent returns parent Process of the process. +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return nil, err + } + if p.parent == 0 { + return nil, fmt.Errorf("wrong number of parents") + } + return NewProcess(p.parent) +} + +// Status returns the process status. +// Return value could be one of these. +// R: Running S: Sleep T: Stop I: Idle +// Z: Zombie W: Wait L: Lock +// The character is same within all supported platforms. +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return "", err + } + return p.status, nil +} + +// Foreground returns true if the process is in foreground, false otherwise. +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "stat") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return false, err + } + fields := strings.Fields(string(contents)) + if len(fields) < 8 { + return false, fmt.Errorf("insufficient data in %s", statPath) + } + pgid := fields[4] + tpgid := fields[7] + return pgid == tpgid, nil +} + +// Uids returns user ids of the process as a slice of the int +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []int32{}, err + } + return p.uids, nil +} + +// Gids returns group ids of the process as a slice of the int +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []int32{}, err + } + return p.gids, nil +} + +// Terminal returns a terminal which is associated with the process. +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + t, _, _, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return "", err + } + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + terminal := termmap[t] + return terminal, nil +} + +// Nice returns a nice value (priority). +// Notice: gopsutil can not set nice value. +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + _, _, _, _, _, nice, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return 0, err + } + return nice, nil +} + +// IOnice returns process I/O nice value (priority). +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +// Rlimit returns Resource Limits. +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return p.RlimitUsage(false) +} + +// RlimitUsage returns Resource Limits. +// If gatherUsed is true, the currently used value will be gathered and added +// to the resulting RlimitStat. +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + rlimits, err := p.fillFromLimitsWithContext(ctx) + if !gatherUsed || err != nil { + return rlimits, err + } + + _, _, _, _, rtprio, nice, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + if err := p.fillFromStatusWithContext(ctx); err != nil { + return nil, err + } + + for i := range rlimits { + rs := &rlimits[i] + switch rs.Resource { + case RLIMIT_CPU: + times, err := p.Times() + if err != nil { + return nil, err + } + rs.Used = uint64(times.User + times.System) + case RLIMIT_DATA: + rs.Used = uint64(p.memInfo.Data) + case RLIMIT_STACK: + rs.Used = uint64(p.memInfo.Stack) + case RLIMIT_RSS: + rs.Used = uint64(p.memInfo.RSS) + case RLIMIT_NOFILE: + n, err := p.NumFDs() + if err != nil { + return nil, err + } + rs.Used = uint64(n) + case RLIMIT_MEMLOCK: + rs.Used = uint64(p.memInfo.Locked) + case RLIMIT_AS: + rs.Used = uint64(p.memInfo.VMS) + case RLIMIT_LOCKS: + //TODO we can get the used value from /proc/$pid/locks. But linux doesn't enforce it, so not a high priority. + case RLIMIT_SIGPENDING: + rs.Used = p.sigInfo.PendingProcess + case RLIMIT_NICE: + // The rlimit for nice is a little unusual, in that 0 means the niceness cannot be decreased beyond the current value, but it can be increased. + // So effectively: if rs.Soft == 0 { rs.Soft = rs.Used } + rs.Used = uint64(nice) + case RLIMIT_RTPRIO: + rs.Used = uint64(rtprio) + } + } + + return rlimits, err +} + +// IOCounters returns IO Counters. +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return p.fillFromIOWithContext(ctx) +} + +// NumCtxSwitches returns the number of the context switches of the process. +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return nil, err + } + return p.numCtxSwitches, nil +} + +// NumFDs returns the number of File Descriptors used by the process. +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + _, fnames, err := p.fillFromfdListWithContext(ctx) + return int32(len(fnames)), err +} + +// NumThreads returns the number of threads used by the process. +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return 0, err + } + return p.numThreads, nil +} + +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + taskPath := common.HostProc(strconv.Itoa(int(p.Pid)), "task") + + tids, err := readPidsFromDir(taskPath) + if err != nil { + return nil, err + } + + for _, tid := range tids { + _, _, cpuTimes, _, _, _, _, err := p.fillFromTIDStatWithContext(ctx, tid) + if err != nil { + return nil, err + } + ret[tid] = cpuTimes + } + + return ret, nil +} + +// Times returns CPU times of the process. +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + _, _, cpuTimes, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + return cpuTimes, nil +} + +// CPUAffinity returns CPU affinity of the process. +// +// Notice: Not implemented yet. +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +// MemoryInfo returns platform in-dependend memory information, such as RSS, VMS and Swap +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + meminfo, _, err := p.fillFromStatmWithContext(ctx) + if err != nil { + return nil, err + } + return meminfo, nil +} + +// MemoryInfoEx returns platform dependend memory information. +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + _, memInfoEx, err := p.fillFromStatmWithContext(ctx) + if err != nil { + return nil, err + } + return memInfoEx, nil +} + +// PageFaultsInfo returns the process's page fault counters +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + _, _, _, _, _, _, pageFaults, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + return pageFaults, nil + +} + +// Children returns a slice of Process of the process. +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + if pids == nil || len(pids) == 0 { + return nil, ErrorNoChildren + } + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +// OpenFiles returns a slice of OpenFilesStat opend by the process. +// OpenFilesStat includes a file path and file descriptor. +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + _, ofs, err := p.fillFromfdWithContext(ctx) + if err != nil { + return nil, err + } + ret := make([]OpenFilesStat, len(ofs)) + for i, o := range ofs { + ret[i] = *o + } + + return ret, nil +} + +// Connections returns a slice of net.ConnectionStat used by the process. +// This returns all kind of the connection. This measn TCP, UDP or UNIX. +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPid("all", p.Pid) +} + +// Connections returns a slice of net.ConnectionStat used by the process at most `max` +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMax("all", p.Pid, max) +} + +// NetIOCounters returns NetIOCounters of the process. +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + filename := common.HostProc(strconv.Itoa(int(p.Pid)), "net/dev") + return net.IOCountersByFile(pernic, filename) +} + +// MemoryMaps get memory maps from /proc/(pid)/smaps +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + pid := p.Pid + var ret []MemoryMapsStat + if grouped { + ret = make([]MemoryMapsStat, 1) + } + smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps") + contents, err := ioutil.ReadFile(smapsPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(contents), "\n") + + // function of parsing a block + getBlock := func(first_line []string, block []string) (MemoryMapsStat, error) { + m := MemoryMapsStat{} + m.Path = first_line[len(first_line)-1] + + for _, line := range block { + if strings.Contains(line, "VmFlags") { + continue + } + field := strings.Split(line, ":") + if len(field) < 2 { + continue + } + v := strings.Trim(field[1], " kB") // remove last "kB" + t, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return m, err + } + + switch field[0] { + case "Size": + m.Size = t + case "Rss": + m.Rss = t + case "Pss": + m.Pss = t + case "Shared_Clean": + m.SharedClean = t + case "Shared_Dirty": + m.SharedDirty = t + case "Private_Clean": + m.PrivateClean = t + case "Private_Dirty": + m.PrivateDirty = t + case "Referenced": + m.Referenced = t + case "Anonymous": + m.Anonymous = t + case "Swap": + m.Swap = t + } + } + return m, nil + } + + blocks := make([]string, 16) + for _, line := range lines { + field := strings.Split(line, " ") + if strings.HasSuffix(field[0], ":") == false { + // new block section + if len(blocks) > 0 { + g, err := getBlock(field, blocks) + if err != nil { + return &ret, err + } + if grouped { + ret[0].Size += g.Size + ret[0].Rss += g.Rss + ret[0].Pss += g.Pss + ret[0].SharedClean += g.SharedClean + ret[0].SharedDirty += g.SharedDirty + ret[0].PrivateClean += g.PrivateClean + ret[0].PrivateDirty += g.PrivateDirty + ret[0].Referenced += g.Referenced + ret[0].Anonymous += g.Anonymous + ret[0].Swap += g.Swap + } else { + ret = append(ret, g) + } + } + // starts new block + blocks = make([]string, 16) + } else { + blocks = append(blocks, line) + } + } + + return &ret, nil +} + +/** +** Internal functions +**/ + +func limitToInt(val string) (int32, error) { + if val == "unlimited" { + return math.MaxInt32, nil + } else { + res, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return 0, err + } + return int32(res), nil + } +} + +// Get num_fds from /proc/(pid)/limits +func (p *Process) fillFromLimitsWithContext(ctx context.Context) ([]RlimitStat, error) { + pid := p.Pid + limitsFile := common.HostProc(strconv.Itoa(int(pid)), "limits") + d, err := os.Open(limitsFile) + if err != nil { + return nil, err + } + defer d.Close() + + var limitStats []RlimitStat + + limitsScanner := bufio.NewScanner(d) + for limitsScanner.Scan() { + var statItem RlimitStat + + str := strings.Fields(limitsScanner.Text()) + + // Remove the header line + if strings.Contains(str[len(str)-1], "Units") { + continue + } + + // Assert that last item is a Hard limit + statItem.Hard, err = limitToInt(str[len(str)-1]) + if err != nil { + // On error remove last item an try once again since it can be unit or header line + str = str[:len(str)-1] + statItem.Hard, err = limitToInt(str[len(str)-1]) + if err != nil { + return nil, err + } + } + // Remove last item from string + str = str[:len(str)-1] + + //Now last item is a Soft limit + statItem.Soft, err = limitToInt(str[len(str)-1]) + if err != nil { + return nil, err + } + // Remove last item from string + str = str[:len(str)-1] + + //The rest is a stats name + resourceName := strings.Join(str, " ") + switch resourceName { + case "Max cpu time": + statItem.Resource = RLIMIT_CPU + case "Max file size": + statItem.Resource = RLIMIT_FSIZE + case "Max data size": + statItem.Resource = RLIMIT_DATA + case "Max stack size": + statItem.Resource = RLIMIT_STACK + case "Max core file size": + statItem.Resource = RLIMIT_CORE + case "Max resident set": + statItem.Resource = RLIMIT_RSS + case "Max processes": + statItem.Resource = RLIMIT_NPROC + case "Max open files": + statItem.Resource = RLIMIT_NOFILE + case "Max locked memory": + statItem.Resource = RLIMIT_MEMLOCK + case "Max address space": + statItem.Resource = RLIMIT_AS + case "Max file locks": + statItem.Resource = RLIMIT_LOCKS + case "Max pending signals": + statItem.Resource = RLIMIT_SIGPENDING + case "Max msgqueue size": + statItem.Resource = RLIMIT_MSGQUEUE + case "Max nice priority": + statItem.Resource = RLIMIT_NICE + case "Max realtime priority": + statItem.Resource = RLIMIT_RTPRIO + case "Max realtime timeout": + statItem.Resource = RLIMIT_RTTIME + default: + continue + } + + limitStats = append(limitStats, statItem) + } + + if err := limitsScanner.Err(); err != nil { + return nil, err + } + + return limitStats, nil +} + +// Get list of /proc/(pid)/fd files +func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "fd") + d, err := os.Open(statPath) + if err != nil { + return statPath, []string{}, err + } + defer d.Close() + fnames, err := d.Readdirnames(-1) + return statPath, fnames, err +} + +// Get num_fds from /proc/(pid)/fd +func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) { + statPath, fnames, err := p.fillFromfdListWithContext(ctx) + if err != nil { + return 0, nil, err + } + numFDs := int32(len(fnames)) + + var openfiles []*OpenFilesStat + for _, fd := range fnames { + fpath := filepath.Join(statPath, fd) + filepath, err := os.Readlink(fpath) + if err != nil { + continue + } + t, err := strconv.ParseUint(fd, 10, 64) + if err != nil { + return numFDs, openfiles, err + } + o := &OpenFilesStat{ + Path: filepath, + Fd: t, + } + openfiles = append(openfiles, o) + } + + return numFDs, openfiles, nil +} + +// Get cwd from /proc/(pid)/cwd +func (p *Process) fillFromCwdWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cwdPath := common.HostProc(strconv.Itoa(int(pid)), "cwd") + cwd, err := os.Readlink(cwdPath) + if err != nil { + return "", err + } + return string(cwd), nil +} + +// Get exe from /proc/(pid)/exe +func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) { + pid := p.Pid + exePath := common.HostProc(strconv.Itoa(int(pid)), "exe") + exe, err := os.Readlink(exePath) + if err != nil { + return "", err + } + return string(exe), nil +} + +// Get cmdline from /proc/(pid)/cmdline +func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline") + cmdline, err := ioutil.ReadFile(cmdPath) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { + pid := p.Pid + cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline") + cmdline, err := ioutil.ReadFile(cmdPath) + if err != nil { + return nil, err + } + if len(cmdline) == 0 { + return nil, nil + } + if cmdline[len(cmdline)-1] == 0 { + cmdline = cmdline[:len(cmdline)-1] + } + parts := bytes.Split(cmdline, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +// Get IO status from /proc/(pid)/io +func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) { + pid := p.Pid + ioPath := common.HostProc(strconv.Itoa(int(pid)), "io") + ioline, err := ioutil.ReadFile(ioPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(ioline), "\n") + ret := &IOCountersStat{} + + for _, line := range lines { + field := strings.Fields(line) + if len(field) < 2 { + continue + } + t, err := strconv.ParseUint(field[1], 10, 64) + if err != nil { + return nil, err + } + param := field[0] + if strings.HasSuffix(param, ":") { + param = param[:len(param)-1] + } + switch param { + case "syscr": + ret.ReadCount = t + case "syscw": + ret.WriteCount = t + case "read_bytes": + ret.ReadBytes = t + case "write_bytes": + ret.WriteBytes = t + } + } + + return ret, nil +} + +// Get memory info from /proc/(pid)/statm +func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) { + pid := p.Pid + memPath := common.HostProc(strconv.Itoa(int(pid)), "statm") + contents, err := ioutil.ReadFile(memPath) + if err != nil { + return nil, nil, err + } + fields := strings.Split(string(contents), " ") + + vms, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, nil, err + } + rss, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, nil, err + } + memInfo := &MemoryInfoStat{ + RSS: rss * PageSize, + VMS: vms * PageSize, + } + + shared, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, nil, err + } + text, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, nil, err + } + lib, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, nil, err + } + dirty, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, nil, err + } + + memInfoEx := &MemoryInfoExStat{ + RSS: rss * PageSize, + VMS: vms * PageSize, + Shared: shared * PageSize, + Text: text * PageSize, + Lib: lib * PageSize, + Dirty: dirty * PageSize, + } + + return memInfo, memInfoEx, nil +} + +// Get various status from /proc/(pid)/status +func (p *Process) fillFromStatusWithContext(ctx context.Context) error { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "status") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + p.numCtxSwitches = &NumCtxSwitchesStat{} + p.memInfo = &MemoryInfoStat{} + p.sigInfo = &SignalInfoStat{} + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Name": + p.name = strings.Trim(value, " \t") + if len(p.name) >= 15 { + cmdlineSlice, err := p.CmdlineSlice() + if err != nil { + return err + } + if len(cmdlineSlice) > 0 { + extendedName := filepath.Base(cmdlineSlice[0]) + if strings.HasPrefix(extendedName, p.name) { + p.name = extendedName + } else { + p.name = cmdlineSlice[0] + } + } + } + case "State": + p.status = value[0:1] + case "PPid", "Ppid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.parent = int32(pval) + case "Tgid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.tgid = int32(pval) + case "Uid": + p.uids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, int32(v)) + } + case "Gid": + p.gids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.gids = append(p.gids, int32(v)) + } + case "Threads": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.numThreads = int32(v) + case "voluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Voluntary = v + case "nonvoluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Involuntary = v + case "VmRSS": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.RSS = v * 1024 + case "VmSize": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.VMS = v * 1024 + case "VmSwap": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Swap = v * 1024 + case "VmHWM": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.HWM = v * 1024 + case "VmData": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Data = v * 1024 + case "VmStk": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Stack = v * 1024 + case "VmLck": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Locked = v * 1024 + case "SigPnd": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingThread = v + case "ShdPnd": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingProcess = v + case "SigBlk": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Blocked = v + case "SigIgn": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Ignored = v + case "SigCgt": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Caught = v + } + + } + return nil +} + +func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + pid := p.Pid + var statPath string + + if tid == -1 { + statPath = common.HostProc(strconv.Itoa(int(pid)), "stat") + } else { + statPath = common.HostProc(strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat") + } + + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + fields := strings.Fields(string(contents)) + + i := 1 + for !strings.HasSuffix(fields[i], ")") { + i++ + } + + terminal, err := strconv.ParseUint(fields[i+5], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + ppid, err := strconv.ParseInt(fields[i+2], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + utime, err := strconv.ParseFloat(fields[i+12], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + stime, err := strconv.ParseFloat(fields[i+13], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + cpuTimes := &cpu.TimesStat{ + CPU: "cpu", + User: float64(utime / ClockTicks), + System: float64(stime / ClockTicks), + } + + bootTime, _ := common.BootTimeWithContext(ctx) + t, err := strconv.ParseUint(fields[i+20], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + ctime := (t / uint64(ClockTicks)) + uint64(bootTime) + createTime := int64(ctime * 1000) + + rtpriority, err := strconv.ParseInt(fields[i+16], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + if rtpriority < 0 { + rtpriority = rtpriority*-1 - 1 + } else { + rtpriority = 0 + } + + // p.Nice = mustParseInt32(fields[18]) + // use syscall instead of parse Stat file + snice, _ := unix.Getpriority(PrioProcess, int(pid)) + nice := int32(snice) // FIXME: is this true? + + minFault, err := strconv.ParseUint(fields[i+8], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMinFault, err := strconv.ParseUint(fields[i+9], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + majFault, err := strconv.ParseUint(fields[i+10], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMajFault, err := strconv.ParseUint(fields[i+11], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + faults := &PageFaultsStat{ + MinorFaults: minFault, + MajorFaults: majFault, + ChildMinorFaults: cMinFault, + ChildMajorFaults: cMajFault, + } + + return terminal, int32(ppid), cpuTimes, createTime, uint32(rtpriority), nice, faults, nil +} + +func (p *Process) fillFromStatWithContext(ctx context.Context) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStatWithContext(ctx, -1) +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return readPidsFromDir(common.HostProc()) +} + +// Process returns a slice of pointers to Process structs for all +// currently running processes. +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func readPidsFromDir(path string) ([]int32, error) { + var ret []int32 + + d, err := os.Open(path) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/process/process_openbsd.go new file mode 100644 index 00000000..8bac0989 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_openbsd.go @@ -0,0 +1,524 @@ +// +build openbsd + +package process + +import ( + "C" + "bytes" + "context" + "encoding/binary" + "os/exec" + "strconv" + "strings" + "unsafe" + + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + mem "github.com/shirou/gopsutil/mem" + net "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := Processes() + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv} + buf, _, err := common.CallSyscall(mib) + + if err != nil { + return nil, err + } + + argc := 0 + argvp := unsafe.Pointer(&buf[0]) + argv := *(**C.char)(unsafe.Pointer(argvp)) + size := unsafe.Sizeof(argv) + var strParts []string + + for argv != nil { + strParts = append(strParts, C.GoString(argv)) + + argc++ + argv = *(**C.char)(unsafe.Pointer(uintptr(argvp) + uintptr(argc)*size)) + } + return strParts, nil +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + argv, err := p.CmdlineSlice() + if err != nil { + return "", err + } + return strings.Join(argv, " "), nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return p, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + var s string + switch k.Stat { + case SIDL: + case SRUN: + case SONPROC: + s = "R" + case SSLEEP: + s = "S" + case SSTOP: + s = "T" + case SDEAD: + s = "Z" + } + + return s, nil +} +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]int32, 0, 3) + + uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + + return uids, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Uru_inblock), + WriteCount: uint64(k.Uru_oublock), + }, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + /* not supported, just return 1 */ + return 1, nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Uutime_sec) + float64(k.Uutime_usec)/1000000, + System: float64(k.Ustime_sec) + float64(k.Ustime_usec)/1000000, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + pageSize, err := mem.GetPageSize() + if err != nil { + return nil, err + } + + return &MemoryInfoStat{ + RSS: uint64(k.Vm_rssize) * pageSize, + VMS: uint64(k.Vm_tsize) + uint64(k.Vm_dsize) + + uint64(k.Vm_ssize), + }, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + buf, length, err := CallKernProcSyscall(KernProcAll, 0) + + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcess(int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} + +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + buf, length, err := CallKernProcSyscall(KernProcPID, p.Pid) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} + +func CallKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) { + return CallKernProcSyscallWithContext(context.Background(), op, arg) +} + +func CallKernProcSyscallWithContext(ctx context.Context, op int32, arg int32) ([]byte, uint64, error) { + mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0} + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return nil, length, err + } + + count := int32(length / uint64(sizeOfKinfoProc)) + mib = []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, count} + mibptr = unsafe.Pointer(&mib[0]) + miblen = uint64(len(mib)) + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go new file mode 100644 index 00000000..8607422b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go @@ -0,0 +1,200 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x268 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Pad_cgo_0 [4]byte + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_posix.go b/vendor/github.com/shirou/gopsutil/process/process_posix.go new file mode 100644 index 00000000..13f0308c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_posix.go @@ -0,0 +1,175 @@ +// +build linux freebsd openbsd darwin + +package process + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// POSIX +func getTerminalMap() (map[uint64]string, error) { + ret := make(map[uint64]string) + var termfiles []string + + d, err := os.Open("/dev") + if err != nil { + return nil, err + } + defer d.Close() + + devnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, devname := range devnames { + if strings.HasPrefix(devname, "/dev/tty") { + termfiles = append(termfiles, "/dev/tty/"+devname) + } + } + + var ptsnames []string + ptsd, err := os.Open("/dev/pts") + if err != nil { + ptsnames, _ = filepath.Glob("/dev/ttyp*") + if ptsnames == nil { + return nil, err + } + } + defer ptsd.Close() + + if ptsnames == nil { + defer ptsd.Close() + ptsnames, err = ptsd.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, ptsname := range ptsnames { + termfiles = append(termfiles, "/dev/pts/"+ptsname) + } + } else { + termfiles = ptsnames + } + + for _, name := range termfiles { + stat := unix.Stat_t{} + if err = unix.Stat(name, &stat); err != nil { + return nil, err + } + rdev := uint64(stat.Rdev) + ret[rdev] = strings.Replace(name, "/dev", "", -1) + } + return ret, nil +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid <= 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + proc, err := os.FindProcess(int(pid)) + if err != nil { + return false, err + } + err = proc.Signal(syscall.Signal(0)) + if err == nil { + return true, nil + } + if err.Error() == "os: process already finished" { + return false, nil + } + errno, ok := err.(syscall.Errno) + if !ok { + return false, err + } + switch errno { + case syscall.ESRCH: + return false, nil + case syscall.EPERM: + return true, nil + } + return false, err +} + +// SendSignal sends a unix.Signal to the process. +// Currently, SIGSTOP, SIGCONT, SIGTERM and SIGKILL are supported. +func (p *Process) SendSignal(sig syscall.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + + err = process.Signal(sig) + if err != nil { + return err + } + + return nil +} + +// Suspend sends SIGSTOP to the process. +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGSTOP) +} + +// Resume sends SIGCONT to the process. +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGCONT) +} + +// Terminate sends SIGTERM to the process. +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGTERM) +} + +// Kill sends SIGKILL to the process. +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGKILL) +} + +// Username returns a username of the process. +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + uids, err := p.Uids() + if err != nil { + return "", err + } + if len(uids) > 0 { + u, err := user.LookupId(strconv.Itoa(int(uids[0]))) + if err != nil { + return "", err + } + return u.Username, nil + } + return "", nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows.go b/vendor/github.com/shirou/gopsutil/process/process_windows.go new file mode 100644 index 00000000..d42b34fa --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows.go @@ -0,0 +1,848 @@ +// +build windows + +package process + +import ( + "context" + "fmt" + "os" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/StackExchange/wmi" + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + net "github.com/shirou/gopsutil/net" + "github.com/shirou/w32" + "golang.org/x/sys/windows" +) + +var ( + modpsapi = windows.NewLazySystemDLL("psapi.dll") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW") + + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges") + + procQueryFullProcessImageNameW = common.Modkernel32.NewProc("QueryFullProcessImageNameW") + procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") +) + +type SystemProcessInformation struct { + NextEntryOffset uint64 + NumberOfThreads uint64 + Reserved1 [48]byte + Reserved2 [3]byte + UniqueProcessID uintptr + Reserved3 uintptr + HandleCount uint64 + Reserved4 [4]byte + Reserved5 [11]byte + PeakPagefileUsage uint64 + PrivatePageCount uint64 + Reserved6 [6]uint64 +} + +// Memory_info_ex is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +type Win32_Process struct { + Name string + ExecutablePath *string + CommandLine *string + Priority uint32 + CreationDate *time.Time + ProcessID uint32 + ThreadCount uint32 + Status *string + ReadOperationCount uint64 + ReadTransferCount uint64 + WriteOperationCount uint64 + WriteTransferCount uint64 + CSCreationClassName string + CSName string + Caption *string + CreationClassName string + Description *string + ExecutionState *uint16 + HandleCount uint32 + KernelModeTime uint64 + MaximumWorkingSetSize *uint32 + MinimumWorkingSetSize *uint32 + OSCreationClassName string + OSName string + OtherOperationCount uint64 + OtherTransferCount uint64 + PageFaults uint32 + PageFileUsage uint32 + ParentProcessID uint32 + PeakPageFileUsage uint32 + PeakVirtualSize uint64 + PeakWorkingSetSize uint32 + PrivatePageCount uint64 + TerminationDate *time.Time + UserModeTime uint64 + WorkingSetSize uint64 +} + +type winLUID struct { + LowPart winDWord + HighPart winLong +} + +// LUID_AND_ATTRIBUTES +type winLUIDAndAttributes struct { + Luid winLUID + Attributes winDWord +} + +// TOKEN_PRIVILEGES +type winTokenPriviledges struct { + PrivilegeCount winDWord + Privileges [1]winLUIDAndAttributes +} + +type winLong int32 +type winDWord uint32 + +func init() { + wmi.DefaultClient.AllowMissingFields = true + + // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 + handle, err := syscall.GetCurrentProcess() + if err != nil { + return + } + + var token syscall.Token + err = syscall.OpenProcessToken(handle, 0x0028, &token) + if err != nil { + return + } + defer token.Close() + + tokenPriviledges := winTokenPriviledges{PrivilegeCount: 1} + lpName := syscall.StringToUTF16("SeDebugPrivilege") + ret, _, _ := procLookupPrivilegeValue.Call( + 0, + uintptr(unsafe.Pointer(&lpName[0])), + uintptr(unsafe.Pointer(&tokenPriviledges.Privileges[0].Luid))) + if ret == 0 { + return + } + + tokenPriviledges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED + + procAdjustTokenPrivileges.Call( + uintptr(token), + 0, + uintptr(unsafe.Pointer(&tokenPriviledges)), + uintptr(unsafe.Sizeof(tokenPriviledges)), + 0, + 0) +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + // inspired by https://gist.github.com/henkman/3083408 + // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 + var ret []int32 + var read uint32 = 0 + var psSize uint32 = 1024 + const dwordSize uint32 = 4 + + for { + ps := make([]uint32, psSize) + if !w32.EnumProcesses(ps, uint32(len(ps)), &read) { + return nil, fmt.Errorf("could not get w32.EnumProcesses") + } + if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one + psSize += 1024 + continue + } + for _, pid := range ps[:read/dwordSize] { + ret = append(ret, int32(pid)) + } + return ret, nil + + } + +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid == 0 { // special case for pid 0 System Idle Process + return true, nil + } + if pid < 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + if pid%4 != 0 { + // OpenProcess will succeed even on non-existing pid here https://devblogs.microsoft.com/oldnewthing/20080606-00/?p=22043 + // so we list every pid just to be sure and be future-proof + pids, err := PidsWithContext(ctx) + if err != nil { + return false, err + } + for _, i := range pids { + if i == pid { + return true, err + } + } + return false, err + } + const STILL_ACTIVE = 259 // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED { + return true, nil + } + if err == windows.ERROR_INVALID_PARAMETER { + return false, nil + } + if err != nil { + return false, err + } + defer syscall.CloseHandle(syscall.Handle(h)) + var exitCode uint32 + err = windows.GetExitCodeProcess(h, &exitCode) + return exitCode == STILL_ACTIVE, err +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + ppid, _, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + return ppid, nil +} + +func GetWin32Proc(pid int32) ([]Win32_Process, error) { + return GetWin32ProcWithContext(context.Background(), pid) +} + +func GetWin32ProcWithContext(ctx context.Context, pid int32) ([]Win32_Process, error) { + var dst []Win32_Process + query := fmt.Sprintf("WHERE ProcessId = %d", pid) + q := wmi.CreateQuery(&dst, query) + err := common.WMIQueryWithContext(ctx, q, &dst) + if err != nil { + return []Win32_Process{}, fmt.Errorf("could not get win32Proc: %s", err) + } + + if len(dst) == 0 { + return []Win32_Process{}, fmt.Errorf("could not get win32Proc: empty") + } + + return dst, nil +} + +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + _, _, name, err := getFromSnapProcess(p.Pid) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) + } + return name, nil +} + +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + c, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(p.Pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + buf := make([]uint16, syscall.MAX_LONG_PATH) + size := uint32(syscall.MAX_LONG_PATH) + if err := procQueryFullProcessImageNameW.Find(); err == nil { // Vista+ + ret, _, err := procQueryFullProcessImageNameW.Call( + uintptr(c), + uintptr(0), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&size))) + if ret == 0 { + return "", err + } + return windows.UTF16ToString(buf[:]), nil + } + // XP fallback + ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) + if ret == 0 { + return "", err + } + return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil { + return "", fmt.Errorf("could not get CommandLine: %s", err) + } + return *dst[0].CommandLine, nil +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. This merely returns the CommandLine informations passed +// to the process split on the 0x20 ASCII character. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + cmdline, err := p.CmdlineWithContext(ctx) + if err != nil { + return nil, err + } + return strings.Split(cmdline, " "), nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + ru, err := getRusage(p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get CreationDate: %s", err) + } + + return ru.CreationTime.Nanoseconds() / 1000000, nil +} + +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + ppid, err := p.PpidWithContext(ctx) + if err != nil { + return nil, fmt.Errorf("could not get ParentProcessID: %s", err) + } + + return NewProcess(ppid) +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + pid := p.Pid + c, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + + var token syscall.Token + err = syscall.OpenProcessToken(syscall.Handle(c), syscall.TOKEN_QUERY, &token) + if err != nil { + return "", err + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "", err + } + + user, domain, _, err := tokenUser.User.Sid.LookupAccount("") + return domain + "\\" + user, err +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + var uids []int32 + + return uids, common.ErrNotImplementedError +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + var gids []int32 + return gids, common.ErrNotImplementedError +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +// priorityClasses maps a win32 priority class to its WMI equivalent Win32_Process.Priority +// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass +// https://docs.microsoft.com/en-us/windows/desktop/cimwin32prov/win32-process +var priorityClasses = map[int]int32{ + 0x00008000: 10, // ABOVE_NORMAL_PRIORITY_CLASS + 0x00004000: 6, // BELOW_NORMAL_PRIORITY_CLASS + 0x00000080: 13, // HIGH_PRIORITY_CLASS + 0x00000040: 4, // IDLE_PRIORITY_CLASS + 0x00000020: 8, // NORMAL_PRIORITY_CLASS + 0x00000100: 24, // REALTIME_PRIORITY_CLASS +} + +// Nice returns priority in Windows +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + c, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(c) + ret, _, err := procGetPriorityClass.Call(uintptr(c)) + if ret == 0 { + return 0, err + } + priority, ok := priorityClasses[int(ret)] + if !ok { + return 0, fmt.Errorf("unknown priority class %v", ret) + } + return priority, nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + + return rlimit, common.ErrNotImplementedError +} + +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil || len(dst) == 0 { + return nil, fmt.Errorf("could not get Win32Proc: %s", err) + } + ret := &IOCountersStat{ + ReadCount: uint64(dst[0].ReadOperationCount), + ReadBytes: uint64(dst[0].ReadTransferCount), + WriteCount: uint64(dst[0].WriteOperationCount), + WriteBytes: uint64(dst[0].WriteTransferCount), + } + + return ret, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + _, ret, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + return ret, nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + sysTimes, err := getProcessCPUTimes(p.Pid) + if err != nil { + return nil, err + } + + // User and kernel times are represented as a FILETIME structure + // which contains a 64-bit value representing the number of + // 100-nanosecond intervals since January 1, 1601 (UTC): + // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx + // To convert it into a float representing the seconds that the + // process has executed in user/kernel mode I borrowed the code + // below from psutil's _psutil_windows.c, and in turn from Python's + // Modules/posixmodule.c + + user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7 + kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7 + + return &cpu.TimesStat{ + User: user, + System: kernel, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(mem.WorkingSetSize), + VMS: uint64(mem.PagefileUsage), + } + + return ret, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + snap := w32.CreateToolhelp32Snapshot(w32.TH32CS_SNAPPROCESS, uint32(0)) + if snap == 0 { + return out, windows.GetLastError() + } + defer w32.CloseHandle(snap) + var pe32 w32.PROCESSENTRY32 + pe32.DwSize = uint32(unsafe.Sizeof(pe32)) + if !w32.Process32First(snap, &pe32) { + return out, windows.GetLastError() + } + for { + if pe32.Th32ParentProcessID == uint32(p.Pid) { + p, err := NewProcess(int32(pe32.Th32ProcessID)) + if err == nil { + out = append(out, p) + } + } + if !w32.Process32Next(snap, &pe32) { + break + } + } + return out, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func (p *Process) SendSignal(sig windows.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig windows.Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + // PROCESS_TERMINATE = 0x0001 + proc := w32.OpenProcess(0x0001, false, uint32(p.Pid)) + ret := w32.TerminateProcess(proc, 0) + w32.CloseHandle(proc) + + if ret == false { + return windows.GetLastError() + } else { + return nil + } +} + +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + process := os.Process{Pid: int(p.Pid)} + return process.Kill() +} + +func getFromSnapProcess(pid int32) (int32, int32, string, error) { + snap := w32.CreateToolhelp32Snapshot(w32.TH32CS_SNAPPROCESS, uint32(pid)) + if snap == 0 { + return 0, 0, "", windows.GetLastError() + } + defer w32.CloseHandle(snap) + var pe32 w32.PROCESSENTRY32 + pe32.DwSize = uint32(unsafe.Sizeof(pe32)) + if !w32.Process32First(snap, &pe32) { + return 0, 0, "", windows.GetLastError() + } + for { + if pe32.Th32ProcessID == uint32(pid) { + szexe := windows.UTF16ToString(pe32.SzExeFile[:]) + return int32(pe32.Th32ParentProcessID), int32(pe32.CntThreads), szexe, nil + } + if !w32.Process32Next(snap, &pe32) { + break + } + } + return 0, 0, "", fmt.Errorf("couldn't find pid: %d", pid) +} + +// Get processes +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, fmt.Errorf("could not get Processes %s", err) + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func getProcInfo(pid int32) (*SystemProcessInformation, error) { + initialBufferSize := uint64(0x4000) + bufferSize := initialBufferSize + buffer := make([]byte, bufferSize) + + var sysProcInfo SystemProcessInformation + ret, _, _ := common.ProcNtQuerySystemInformation.Call( + uintptr(unsafe.Pointer(&sysProcInfo)), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(unsafe.Pointer(&bufferSize)), + uintptr(unsafe.Pointer(&bufferSize))) + if ret != 0 { + return nil, windows.GetLastError() + } + + return &sysProcInfo, nil +} + +func getRusage(pid int32) (*windows.Rusage, error) { + var CPU windows.Rusage + + c, err := windows.OpenProcess(windows.PROCESS_QUERY_INFORMATION, false, uint32(pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + + if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil { + return nil, err + } + + return &CPU, nil +} + +func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) { + var mem PROCESS_MEMORY_COUNTERS + c, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) + if err != nil { + return mem, err + } + defer windows.CloseHandle(c) + if err := getProcessMemoryInfo(c, &mem); err != nil { + return mem, err + } + + return mem, err +} + +func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +type SYSTEM_TIMES struct { + CreateTime syscall.Filetime + ExitTime syscall.Filetime + KernelTime syscall.Filetime + UserTime syscall.Filetime +} + +func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) { + var times SYSTEM_TIMES + + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) + if err != nil { + return times, err + } + defer windows.CloseHandle(h) + + err = syscall.GetProcessTimes( + syscall.Handle(h), + ×.CreateTime, + ×.ExitTime, + ×.KernelTime, + ×.UserTime, + ) + + return times, err +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows_386.go b/vendor/github.com/shirou/gopsutil/process/process_windows_386.go new file mode 100644 index 00000000..68f3153d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows_386.go @@ -0,0 +1,16 @@ +// +build windows + +package process + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint32 + WorkingSetSize uint32 + QuotaPeakPagedPoolUsage uint32 + QuotaPagedPoolUsage uint32 + QuotaPeakNonPagedPoolUsage uint32 + QuotaNonPagedPoolUsage uint32 + PagefileUsage uint32 + PeakPagefileUsage uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go new file mode 100644 index 00000000..df286dff --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go @@ -0,0 +1,16 @@ +// +build windows + +package process + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 +} diff --git a/vendor/github.com/shirou/w32/AUTHORS b/vendor/github.com/shirou/w32/AUTHORS new file mode 100644 index 00000000..c0785e82 --- /dev/null +++ b/vendor/github.com/shirou/w32/AUTHORS @@ -0,0 +1,16 @@ +# This is the official list of 'w32' authors for copyright purposes. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +# Contributors +# ============ + +Allen Dang +Benny Siegert +Bruno Bigras +Gerald Rosenberg +Michael Henke \ No newline at end of file diff --git a/vendor/github.com/shirou/w32/LICENSE b/vendor/github.com/shirou/w32/LICENSE new file mode 100644 index 00000000..9f36608c --- /dev/null +++ b/vendor/github.com/shirou/w32/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2010-2012 The w32 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The names of the authors may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shirou/w32/README.md b/vendor/github.com/shirou/w32/README.md new file mode 100644 index 00000000..ed196e76 --- /dev/null +++ b/vendor/github.com/shirou/w32/README.md @@ -0,0 +1,33 @@ +About w32 +========== + +w32 is a wrapper of windows apis for the Go Programming Language. + +It wraps win32 apis to "Go style" to make them easier to use. + +Setup +===== + +1. Make sure you have a working Go installation and build environment, + see this go-nuts post for details: + http://groups.google.com/group/golang-nuts/msg/5c87630a84f4fd0c + + Updated versions of the Windows Go build are available here: + http://code.google.com/p/gomingw/downloads/list + +2. Create a "gopath" directory if you do not have one yet and set the + GOPATH variable accordingly. For example: + mkdir -p go-externals/src + export GOPATH=${PWD}/go-externals + +3. go get github.com/AllenDang/w32 + +4. go install github.com/AllenDang/w32... + +Contribute +========== + +Contributions in form of design, code, documentation, bug reporting or other +ways you see fit are very welcome. + +Thank You! diff --git a/vendor/github.com/shirou/w32/advapi32.go b/vendor/github.com/shirou/w32/advapi32.go new file mode 100644 index 00000000..35fd35a6 --- /dev/null +++ b/vendor/github.com/shirou/w32/advapi32.go @@ -0,0 +1,301 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +var ( + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + + procRegCreateKeyEx = modadvapi32.NewProc("RegCreateKeyExW") + procRegOpenKeyEx = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegGetValue = modadvapi32.NewProc("RegGetValueW") + procRegEnumKeyEx = modadvapi32.NewProc("RegEnumKeyExW") + // procRegSetKeyValue = modadvapi32.NewProc("RegSetKeyValueW") + procRegSetValueEx = modadvapi32.NewProc("RegSetValueExW") + procOpenEventLog = modadvapi32.NewProc("OpenEventLogW") + procReadEventLog = modadvapi32.NewProc("ReadEventLogW") + procCloseEventLog = modadvapi32.NewProc("CloseEventLog") + procOpenSCManager = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procOpenService = modadvapi32.NewProc("OpenServiceW") + procStartService = modadvapi32.NewProc("StartServiceW") + procControlService = modadvapi32.NewProc("ControlService") +) + +func RegCreateKey(hKey HKEY, subKey string) HKEY { + var result HKEY + ret, _, _ := procRegCreateKeyEx.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(0), + uintptr(0), + uintptr(0), + uintptr(KEY_ALL_ACCESS), + uintptr(0), + uintptr(unsafe.Pointer(&result)), + uintptr(0)) + _ = ret + return result +} + +func RegOpenKeyEx(hKey HKEY, subKey string, samDesired uint32) HKEY { + var result HKEY + ret, _, _ := procRegOpenKeyEx.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(0), + uintptr(samDesired), + uintptr(unsafe.Pointer(&result))) + + if ret != ERROR_SUCCESS { + panic(fmt.Sprintf("RegOpenKeyEx(%d, %s, %d) failed", hKey, subKey, samDesired)) + } + return result +} + +func RegCloseKey(hKey HKEY) error { + var err error + ret, _, _ := procRegCloseKey.Call( + uintptr(hKey)) + + if ret != ERROR_SUCCESS { + err = errors.New("RegCloseKey failed") + } + return err +} + +func RegGetRaw(hKey HKEY, subKey string, value string) []byte { + var bufLen uint32 + var valptr unsafe.Pointer + if len(value) > 0 { + valptr = unsafe.Pointer(syscall.StringToUTF16Ptr(value)) + } + procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(valptr), + uintptr(RRF_RT_ANY), + 0, + 0, + uintptr(unsafe.Pointer(&bufLen))) + + if bufLen == 0 { + return nil + } + + buf := make([]byte, bufLen) + ret, _, _ := procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(valptr), + uintptr(RRF_RT_ANY), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen))) + + if ret != ERROR_SUCCESS { + return nil + } + + return buf +} + +func RegSetBinary(hKey HKEY, subKey string, value []byte) (errno int) { + var lptr, vptr unsafe.Pointer + if len(subKey) > 0 { + lptr = unsafe.Pointer(syscall.StringToUTF16Ptr(subKey)) + } + if len(value) > 0 { + vptr = unsafe.Pointer(&value[0]) + } + ret, _, _ := procRegSetValueEx.Call( + uintptr(hKey), + uintptr(lptr), + uintptr(0), + uintptr(REG_BINARY), + uintptr(vptr), + uintptr(len(value))) + + return int(ret) +} + +func RegGetString(hKey HKEY, subKey string, value string) string { + var bufLen uint32 + procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(value))), + uintptr(RRF_RT_REG_SZ), + 0, + 0, + uintptr(unsafe.Pointer(&bufLen))) + + if bufLen == 0 { + return "" + } + + buf := make([]uint16, bufLen) + ret, _, _ := procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(value))), + uintptr(RRF_RT_REG_SZ), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen))) + + if ret != ERROR_SUCCESS { + return "" + } + + return syscall.UTF16ToString(buf) +} + +/* +func RegSetKeyValue(hKey HKEY, subKey string, valueName string, dwType uint32, data uintptr, cbData uint16) (errno int) { + ret, _, _ := procRegSetKeyValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(valueName))), + uintptr(dwType), + data, + uintptr(cbData)) + + return int(ret) +} +*/ + +func RegEnumKeyEx(hKey HKEY, index uint32) string { + var bufLen uint32 = 255 + buf := make([]uint16, bufLen) + procRegEnumKeyEx.Call( + uintptr(hKey), + uintptr(index), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen)), + 0, + 0, + 0, + 0) + return syscall.UTF16ToString(buf) +} + +func OpenEventLog(servername string, sourcename string) HANDLE { + ret, _, _ := procOpenEventLog.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(servername))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(sourcename)))) + + return HANDLE(ret) +} + +func ReadEventLog(eventlog HANDLE, readflags, recordoffset uint32, buffer []byte, numberofbytestoread uint32, bytesread, minnumberofbytesneeded *uint32) bool { + ret, _, _ := procReadEventLog.Call( + uintptr(eventlog), + uintptr(readflags), + uintptr(recordoffset), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(numberofbytestoread), + uintptr(unsafe.Pointer(bytesread)), + uintptr(unsafe.Pointer(minnumberofbytesneeded))) + + return ret != 0 +} + +func CloseEventLog(eventlog HANDLE) bool { + ret, _, _ := procCloseEventLog.Call( + uintptr(eventlog)) + + return ret != 0 +} + +func OpenSCManager(lpMachineName, lpDatabaseName string, dwDesiredAccess uint32) (HANDLE, error) { + var p1, p2 uintptr + if len(lpMachineName) > 0 { + p1 = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpMachineName))) + } + if len(lpDatabaseName) > 0 { + p2 = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDatabaseName))) + } + ret, _, _ := procOpenSCManager.Call( + p1, + p2, + uintptr(dwDesiredAccess)) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HANDLE(ret), nil +} + +func CloseServiceHandle(hSCObject HANDLE) error { + ret, _, _ := procCloseServiceHandle.Call(uintptr(hSCObject)) + if ret == 0 { + return syscall.GetLastError() + } + return nil +} + +func OpenService(hSCManager HANDLE, lpServiceName string, dwDesiredAccess uint32) (HANDLE, error) { + ret, _, _ := procOpenService.Call( + uintptr(hSCManager), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpServiceName))), + uintptr(dwDesiredAccess)) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HANDLE(ret), nil +} + +func StartService(hService HANDLE, lpServiceArgVectors []string) error { + l := len(lpServiceArgVectors) + var ret uintptr + if l == 0 { + ret, _, _ = procStartService.Call( + uintptr(hService), + 0, + 0) + } else { + lpArgs := make([]uintptr, l) + for i := 0; i < l; i++ { + lpArgs[i] = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpServiceArgVectors[i]))) + } + + ret, _, _ = procStartService.Call( + uintptr(hService), + uintptr(l), + uintptr(unsafe.Pointer(&lpArgs[0]))) + } + + if ret == 0 { + return syscall.GetLastError() + } + + return nil +} + +func ControlService(hService HANDLE, dwControl uint32, lpServiceStatus *SERVICE_STATUS) bool { + if lpServiceStatus == nil { + panic("ControlService:lpServiceStatus cannot be nil") + } + + ret, _, _ := procControlService.Call( + uintptr(hService), + uintptr(dwControl), + uintptr(unsafe.Pointer(lpServiceStatus))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/comctl32.go b/vendor/github.com/shirou/w32/comctl32.go new file mode 100644 index 00000000..51395580 --- /dev/null +++ b/vendor/github.com/shirou/w32/comctl32.go @@ -0,0 +1,111 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modcomctl32 = syscall.NewLazyDLL("comctl32.dll") + + procInitCommonControlsEx = modcomctl32.NewProc("InitCommonControlsEx") + procImageList_Create = modcomctl32.NewProc("ImageList_Create") + procImageList_Destroy = modcomctl32.NewProc("ImageList_Destroy") + procImageList_GetImageCount = modcomctl32.NewProc("ImageList_GetImageCount") + procImageList_SetImageCount = modcomctl32.NewProc("ImageList_SetImageCount") + procImageList_Add = modcomctl32.NewProc("ImageList_Add") + procImageList_ReplaceIcon = modcomctl32.NewProc("ImageList_ReplaceIcon") + procImageList_Remove = modcomctl32.NewProc("ImageList_Remove") + procTrackMouseEvent = modcomctl32.NewProc("_TrackMouseEvent") +) + +func InitCommonControlsEx(lpInitCtrls *INITCOMMONCONTROLSEX) bool { + ret, _, _ := procInitCommonControlsEx.Call( + uintptr(unsafe.Pointer(lpInitCtrls))) + + return ret != 0 +} + +func ImageList_Create(cx, cy int, flags uint, cInitial, cGrow int) HIMAGELIST { + ret, _, _ := procImageList_Create.Call( + uintptr(cx), + uintptr(cy), + uintptr(flags), + uintptr(cInitial), + uintptr(cGrow)) + + if ret == 0 { + panic("Create image list failed") + } + + return HIMAGELIST(ret) +} + +func ImageList_Destroy(himl HIMAGELIST) bool { + ret, _, _ := procImageList_Destroy.Call( + uintptr(himl)) + + return ret != 0 +} + +func ImageList_GetImageCount(himl HIMAGELIST) int { + ret, _, _ := procImageList_GetImageCount.Call( + uintptr(himl)) + + return int(ret) +} + +func ImageList_SetImageCount(himl HIMAGELIST, uNewCount uint) bool { + ret, _, _ := procImageList_SetImageCount.Call( + uintptr(himl), + uintptr(uNewCount)) + + return ret != 0 +} + +func ImageList_Add(himl HIMAGELIST, hbmImage, hbmMask HBITMAP) int { + ret, _, _ := procImageList_Add.Call( + uintptr(himl), + uintptr(hbmImage), + uintptr(hbmMask)) + + return int(ret) +} + +func ImageList_ReplaceIcon(himl HIMAGELIST, i int, hicon HICON) int { + ret, _, _ := procImageList_ReplaceIcon.Call( + uintptr(himl), + uintptr(i), + uintptr(hicon)) + + return int(ret) +} + +func ImageList_AddIcon(himl HIMAGELIST, hicon HICON) int { + return ImageList_ReplaceIcon(himl, -1, hicon) +} + +func ImageList_Remove(himl HIMAGELIST, i int) bool { + ret, _, _ := procImageList_Remove.Call( + uintptr(himl), + uintptr(i)) + + return ret != 0 +} + +func ImageList_RemoveAll(himl HIMAGELIST) bool { + return ImageList_Remove(himl, -1) +} + +func TrackMouseEvent(tme *TRACKMOUSEEVENT) bool { + ret, _, _ := procTrackMouseEvent.Call( + uintptr(unsafe.Pointer(tme))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/comdlg32.go b/vendor/github.com/shirou/w32/comdlg32.go new file mode 100644 index 00000000..ad9f7762 --- /dev/null +++ b/vendor/github.com/shirou/w32/comdlg32.go @@ -0,0 +1,40 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modcomdlg32 = syscall.NewLazyDLL("comdlg32.dll") + + procGetSaveFileName = modcomdlg32.NewProc("GetSaveFileNameW") + procGetOpenFileName = modcomdlg32.NewProc("GetOpenFileNameW") + procCommDlgExtendedError = modcomdlg32.NewProc("CommDlgExtendedError") +) + +func GetOpenFileName(ofn *OPENFILENAME) bool { + ret, _, _ := procGetOpenFileName.Call( + uintptr(unsafe.Pointer(ofn))) + + return ret != 0 +} + +func GetSaveFileName(ofn *OPENFILENAME) bool { + ret, _, _ := procGetSaveFileName.Call( + uintptr(unsafe.Pointer(ofn))) + + return ret != 0 +} + +func CommDlgExtendedError() uint { + ret, _, _ := procCommDlgExtendedError.Call() + + return uint(ret) +} diff --git a/vendor/github.com/shirou/w32/constants.go b/vendor/github.com/shirou/w32/constants.go new file mode 100644 index 00000000..62d2d4b3 --- /dev/null +++ b/vendor/github.com/shirou/w32/constants.go @@ -0,0 +1,2661 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +const ( + FALSE = 0 + TRUE = 1 +) + +const ( + NO_ERROR = 0 + ERROR_SUCCESS = 0 + ERROR_FILE_NOT_FOUND = 2 + ERROR_PATH_NOT_FOUND = 3 + ERROR_ACCESS_DENIED = 5 + ERROR_INVALID_HANDLE = 6 + ERROR_BAD_FORMAT = 11 + ERROR_INVALID_NAME = 123 + ERROR_MORE_DATA = 234 + ERROR_NO_MORE_ITEMS = 259 + ERROR_INVALID_SERVICE_CONTROL = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT = 1053 + ERROR_SERVICE_NO_THREAD = 1054 + ERROR_SERVICE_DATABASE_LOCKED = 1055 + ERROR_SERVICE_ALREADY_RUNNING = 1056 + ERROR_SERVICE_DISABLED = 1058 + ERROR_SERVICE_DOES_NOT_EXIST = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL = 1061 + ERROR_SERVICE_NOT_ACTIVE = 1062 + ERROR_DATABASE_DOES_NOT_EXIST = 1065 + ERROR_SERVICE_DEPENDENCY_FAIL = 1068 + ERROR_SERVICE_LOGON_FAILED = 1069 + ERROR_SERVICE_MARKED_FOR_DELETE = 1072 + ERROR_SERVICE_DEPENDENCY_DELETED = 1075 +) + +const ( + SE_ERR_FNF = 2 + SE_ERR_PNF = 3 + SE_ERR_ACCESSDENIED = 5 + SE_ERR_OOM = 8 + SE_ERR_DLLNOTFOUND = 32 + SE_ERR_SHARE = 26 + SE_ERR_ASSOCINCOMPLETE = 27 + SE_ERR_DDETIMEOUT = 28 + SE_ERR_DDEFAIL = 29 + SE_ERR_DDEBUSY = 30 + SE_ERR_NOASSOC = 31 +) + +const ( + CW_USEDEFAULT = ^0x7fffffff +) + +// ShowWindow constants +const ( + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_MAXIMIZE = 3 + SW_SHOWMAXIMIZED = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +// Window class styles +const ( + CS_VREDRAW = 0x00000001 + CS_HREDRAW = 0x00000002 + CS_KEYCVTWINDOW = 0x00000004 + CS_DBLCLKS = 0x00000008 + CS_OWNDC = 0x00000020 + CS_CLASSDC = 0x00000040 + CS_PARENTDC = 0x00000080 + CS_NOKEYCVT = 0x00000100 + CS_NOCLOSE = 0x00000200 + CS_SAVEBITS = 0x00000800 + CS_BYTEALIGNCLIENT = 0x00001000 + CS_BYTEALIGNWINDOW = 0x00002000 + CS_GLOBALCLASS = 0x00004000 + CS_IME = 0x00010000 + CS_DROPSHADOW = 0x00020000 +) + +// Predefined cursor constants +const ( + IDC_ARROW = 32512 + IDC_IBEAM = 32513 + IDC_WAIT = 32514 + IDC_CROSS = 32515 + IDC_UPARROW = 32516 + IDC_SIZENWSE = 32642 + IDC_SIZENESW = 32643 + IDC_SIZEWE = 32644 + IDC_SIZENS = 32645 + IDC_SIZEALL = 32646 + IDC_NO = 32648 + IDC_HAND = 32649 + IDC_APPSTARTING = 32650 + IDC_HELP = 32651 + IDC_ICON = 32641 + IDC_SIZE = 32640 +) + +// Predefined icon constants +const ( + IDI_APPLICATION = 32512 + IDI_HAND = 32513 + IDI_QUESTION = 32514 + IDI_EXCLAMATION = 32515 + IDI_ASTERISK = 32516 + IDI_WINLOGO = 32517 + IDI_WARNING = IDI_EXCLAMATION + IDI_ERROR = IDI_HAND + IDI_INFORMATION = IDI_ASTERISK +) + +// Button style constants +const ( + BS_3STATE = 5 + BS_AUTO3STATE = 6 + BS_AUTOCHECKBOX = 3 + BS_AUTORADIOBUTTON = 9 + BS_BITMAP = 128 + BS_BOTTOM = 0X800 + BS_CENTER = 0X300 + BS_CHECKBOX = 2 + BS_DEFPUSHBUTTON = 1 + BS_GROUPBOX = 7 + BS_ICON = 64 + BS_LEFT = 256 + BS_LEFTTEXT = 32 + BS_MULTILINE = 0X2000 + BS_NOTIFY = 0X4000 + BS_OWNERDRAW = 0XB + BS_PUSHBUTTON = 0 + BS_PUSHLIKE = 4096 + BS_RADIOBUTTON = 4 + BS_RIGHT = 512 + BS_RIGHTBUTTON = 32 + BS_TEXT = 0 + BS_TOP = 0X400 + BS_USERBUTTON = 8 + BS_VCENTER = 0XC00 + BS_FLAT = 0X8000 +) + +// Button state constants +const ( + BST_CHECKED = 1 + BST_INDETERMINATE = 2 + BST_UNCHECKED = 0 + BST_FOCUS = 8 + BST_PUSHED = 4 +) + +// Predefined brushes constants +const ( + COLOR_3DDKSHADOW = 21 + COLOR_3DFACE = 15 + COLOR_3DHILIGHT = 20 + COLOR_3DHIGHLIGHT = 20 + COLOR_3DLIGHT = 22 + COLOR_BTNHILIGHT = 20 + COLOR_3DSHADOW = 16 + COLOR_ACTIVEBORDER = 10 + COLOR_ACTIVECAPTION = 2 + COLOR_APPWORKSPACE = 12 + COLOR_BACKGROUND = 1 + COLOR_DESKTOP = 1 + COLOR_BTNFACE = 15 + COLOR_BTNHIGHLIGHT = 20 + COLOR_BTNSHADOW = 16 + COLOR_BTNTEXT = 18 + COLOR_CAPTIONTEXT = 9 + COLOR_GRAYTEXT = 17 + COLOR_HIGHLIGHT = 13 + COLOR_HIGHLIGHTTEXT = 14 + COLOR_INACTIVEBORDER = 11 + COLOR_INACTIVECAPTION = 3 + COLOR_INACTIVECAPTIONTEXT = 19 + COLOR_INFOBK = 24 + COLOR_INFOTEXT = 23 + COLOR_MENU = 4 + COLOR_MENUTEXT = 7 + COLOR_SCROLLBAR = 0 + COLOR_WINDOW = 5 + COLOR_WINDOWFRAME = 6 + COLOR_WINDOWTEXT = 8 + COLOR_HOTLIGHT = 26 + COLOR_GRADIENTACTIVECAPTION = 27 + COLOR_GRADIENTINACTIVECAPTION = 28 +) + +// Button message constants +const ( + BM_CLICK = 245 + BM_GETCHECK = 240 + BM_GETIMAGE = 246 + BM_GETSTATE = 242 + BM_SETCHECK = 241 + BM_SETIMAGE = 247 + BM_SETSTATE = 243 + BM_SETSTYLE = 244 +) + +// Button notifications +const ( + BN_CLICKED = 0 + BN_PAINT = 1 + BN_HILITE = 2 + BN_PUSHED = BN_HILITE + BN_UNHILITE = 3 + BN_UNPUSHED = BN_UNHILITE + BN_DISABLE = 4 + BN_DOUBLECLICKED = 5 + BN_DBLCLK = BN_DOUBLECLICKED + BN_SETFOCUS = 6 + BN_KILLFOCUS = 7 +) + +// GetWindowLong and GetWindowLongPtr constants +const ( + GWL_EXSTYLE = -20 + GWL_STYLE = -16 + GWL_WNDPROC = -4 + GWLP_WNDPROC = -4 + GWL_HINSTANCE = -6 + GWLP_HINSTANCE = -6 + GWL_HWNDPARENT = -8 + GWLP_HWNDPARENT = -8 + GWL_ID = -12 + GWLP_ID = -12 + GWL_USERDATA = -21 + GWLP_USERDATA = -21 +) + +// Window style constants +const ( + WS_OVERLAPPED = 0X00000000 + WS_POPUP = 0X80000000 + WS_CHILD = 0X40000000 + WS_MINIMIZE = 0X20000000 + WS_VISIBLE = 0X10000000 + WS_DISABLED = 0X08000000 + WS_CLIPSIBLINGS = 0X04000000 + WS_CLIPCHILDREN = 0X02000000 + WS_MAXIMIZE = 0X01000000 + WS_CAPTION = 0X00C00000 + WS_BORDER = 0X00800000 + WS_DLGFRAME = 0X00400000 + WS_VSCROLL = 0X00200000 + WS_HSCROLL = 0X00100000 + WS_SYSMENU = 0X00080000 + WS_THICKFRAME = 0X00040000 + WS_GROUP = 0X00020000 + WS_TABSTOP = 0X00010000 + WS_MINIMIZEBOX = 0X00020000 + WS_MAXIMIZEBOX = 0X00010000 + WS_TILED = 0X00000000 + WS_ICONIC = 0X20000000 + WS_SIZEBOX = 0X00040000 + WS_OVERLAPPEDWINDOW = 0X00000000 | 0X00C00000 | 0X00080000 | 0X00040000 | 0X00020000 | 0X00010000 + WS_POPUPWINDOW = 0X80000000 | 0X00800000 | 0X00080000 + WS_CHILDWINDOW = 0X40000000 +) + +// Extended window style constants +const ( + WS_EX_DLGMODALFRAME = 0X00000001 + WS_EX_NOPARENTNOTIFY = 0X00000004 + WS_EX_TOPMOST = 0X00000008 + WS_EX_ACCEPTFILES = 0X00000010 + WS_EX_TRANSPARENT = 0X00000020 + WS_EX_MDICHILD = 0X00000040 + WS_EX_TOOLWINDOW = 0X00000080 + WS_EX_WINDOWEDGE = 0X00000100 + WS_EX_CLIENTEDGE = 0X00000200 + WS_EX_CONTEXTHELP = 0X00000400 + WS_EX_RIGHT = 0X00001000 + WS_EX_LEFT = 0X00000000 + WS_EX_RTLREADING = 0X00002000 + WS_EX_LTRREADING = 0X00000000 + WS_EX_LEFTSCROLLBAR = 0X00004000 + WS_EX_RIGHTSCROLLBAR = 0X00000000 + WS_EX_CONTROLPARENT = 0X00010000 + WS_EX_STATICEDGE = 0X00020000 + WS_EX_APPWINDOW = 0X00040000 + WS_EX_OVERLAPPEDWINDOW = 0X00000100 | 0X00000200 + WS_EX_PALETTEWINDOW = 0X00000100 | 0X00000080 | 0X00000008 + WS_EX_LAYERED = 0X00080000 + WS_EX_NOINHERITLAYOUT = 0X00100000 + WS_EX_LAYOUTRTL = 0X00400000 + WS_EX_NOACTIVATE = 0X08000000 +) + +// Window message constants +const ( + WM_APP = 32768 + WM_ACTIVATE = 6 + WM_ACTIVATEAPP = 28 + WM_AFXFIRST = 864 + WM_AFXLAST = 895 + WM_ASKCBFORMATNAME = 780 + WM_CANCELJOURNAL = 75 + WM_CANCELMODE = 31 + WM_CAPTURECHANGED = 533 + WM_CHANGECBCHAIN = 781 + WM_CHAR = 258 + WM_CHARTOITEM = 47 + WM_CHILDACTIVATE = 34 + WM_CLEAR = 771 + WM_CLOSE = 16 + WM_COMMAND = 273 + WM_COMMNOTIFY = 68 /* OBSOLETE */ + WM_COMPACTING = 65 + WM_COMPAREITEM = 57 + WM_CONTEXTMENU = 123 + WM_COPY = 769 + WM_COPYDATA = 74 + WM_CREATE = 1 + WM_CTLCOLORBTN = 309 + WM_CTLCOLORDLG = 310 + WM_CTLCOLOREDIT = 307 + WM_CTLCOLORLISTBOX = 308 + WM_CTLCOLORMSGBOX = 306 + WM_CTLCOLORSCROLLBAR = 311 + WM_CTLCOLORSTATIC = 312 + WM_CUT = 768 + WM_DEADCHAR = 259 + WM_DELETEITEM = 45 + WM_DESTROY = 2 + WM_DESTROYCLIPBOARD = 775 + WM_DEVICECHANGE = 537 + WM_DEVMODECHANGE = 27 + WM_DISPLAYCHANGE = 126 + WM_DRAWCLIPBOARD = 776 + WM_DRAWITEM = 43 + WM_DROPFILES = 563 + WM_ENABLE = 10 + WM_ENDSESSION = 22 + WM_ENTERIDLE = 289 + WM_ENTERMENULOOP = 529 + WM_ENTERSIZEMOVE = 561 + WM_ERASEBKGND = 20 + WM_EXITMENULOOP = 530 + WM_EXITSIZEMOVE = 562 + WM_FONTCHANGE = 29 + WM_GETDLGCODE = 135 + WM_GETFONT = 49 + WM_GETHOTKEY = 51 + WM_GETICON = 127 + WM_GETMINMAXINFO = 36 + WM_GETTEXT = 13 + WM_GETTEXTLENGTH = 14 + WM_HANDHELDFIRST = 856 + WM_HANDHELDLAST = 863 + WM_HELP = 83 + WM_HOTKEY = 786 + WM_HSCROLL = 276 + WM_HSCROLLCLIPBOARD = 782 + WM_ICONERASEBKGND = 39 + WM_INITDIALOG = 272 + WM_INITMENU = 278 + WM_INITMENUPOPUP = 279 + WM_INPUT = 0X00FF + WM_INPUTLANGCHANGE = 81 + WM_INPUTLANGCHANGEREQUEST = 80 + WM_KEYDOWN = 256 + WM_KEYUP = 257 + WM_KILLFOCUS = 8 + WM_MDIACTIVATE = 546 + WM_MDICASCADE = 551 + WM_MDICREATE = 544 + WM_MDIDESTROY = 545 + WM_MDIGETACTIVE = 553 + WM_MDIICONARRANGE = 552 + WM_MDIMAXIMIZE = 549 + WM_MDINEXT = 548 + WM_MDIREFRESHMENU = 564 + WM_MDIRESTORE = 547 + WM_MDISETMENU = 560 + WM_MDITILE = 550 + WM_MEASUREITEM = 44 + WM_GETOBJECT = 0X003D + WM_CHANGEUISTATE = 0X0127 + WM_UPDATEUISTATE = 0X0128 + WM_QUERYUISTATE = 0X0129 + WM_UNINITMENUPOPUP = 0X0125 + WM_MENURBUTTONUP = 290 + WM_MENUCOMMAND = 0X0126 + WM_MENUGETOBJECT = 0X0124 + WM_MENUDRAG = 0X0123 + WM_APPCOMMAND = 0X0319 + WM_MENUCHAR = 288 + WM_MENUSELECT = 287 + WM_MOVE = 3 + WM_MOVING = 534 + WM_NCACTIVATE = 134 + WM_NCCALCSIZE = 131 + WM_NCCREATE = 129 + WM_NCDESTROY = 130 + WM_NCHITTEST = 132 + WM_NCLBUTTONDBLCLK = 163 + WM_NCLBUTTONDOWN = 161 + WM_NCLBUTTONUP = 162 + WM_NCMBUTTONDBLCLK = 169 + WM_NCMBUTTONDOWN = 167 + WM_NCMBUTTONUP = 168 + WM_NCXBUTTONDOWN = 171 + WM_NCXBUTTONUP = 172 + WM_NCXBUTTONDBLCLK = 173 + WM_NCMOUSEHOVER = 0X02A0 + WM_NCMOUSELEAVE = 0X02A2 + WM_NCMOUSEMOVE = 160 + WM_NCPAINT = 133 + WM_NCRBUTTONDBLCLK = 166 + WM_NCRBUTTONDOWN = 164 + WM_NCRBUTTONUP = 165 + WM_NEXTDLGCTL = 40 + WM_NEXTMENU = 531 + WM_NOTIFY = 78 + WM_NOTIFYFORMAT = 85 + WM_NULL = 0 + WM_PAINT = 15 + WM_PAINTCLIPBOARD = 777 + WM_PAINTICON = 38 + WM_PALETTECHANGED = 785 + WM_PALETTEISCHANGING = 784 + WM_PARENTNOTIFY = 528 + WM_PASTE = 770 + WM_PENWINFIRST = 896 + WM_PENWINLAST = 911 + WM_POWER = 72 + WM_POWERBROADCAST = 536 + WM_PRINT = 791 + WM_PRINTCLIENT = 792 + WM_QUERYDRAGICON = 55 + WM_QUERYENDSESSION = 17 + WM_QUERYNEWPALETTE = 783 + WM_QUERYOPEN = 19 + WM_QUEUESYNC = 35 + WM_QUIT = 18 + WM_RENDERALLFORMATS = 774 + WM_RENDERFORMAT = 773 + WM_SETCURSOR = 32 + WM_SETFOCUS = 7 + WM_SETFONT = 48 + WM_SETHOTKEY = 50 + WM_SETICON = 128 + WM_SETREDRAW = 11 + WM_SETTEXT = 12 + WM_SETTINGCHANGE = 26 + WM_SHOWWINDOW = 24 + WM_SIZE = 5 + WM_SIZECLIPBOARD = 779 + WM_SIZING = 532 + WM_SPOOLERSTATUS = 42 + WM_STYLECHANGED = 125 + WM_STYLECHANGING = 124 + WM_SYSCHAR = 262 + WM_SYSCOLORCHANGE = 21 + WM_SYSCOMMAND = 274 + WM_SYSDEADCHAR = 263 + WM_SYSKEYDOWN = 260 + WM_SYSKEYUP = 261 + WM_TCARD = 82 + WM_THEMECHANGED = 794 + WM_TIMECHANGE = 30 + WM_TIMER = 275 + WM_UNDO = 772 + WM_USER = 1024 + WM_USERCHANGED = 84 + WM_VKEYTOITEM = 46 + WM_VSCROLL = 277 + WM_VSCROLLCLIPBOARD = 778 + WM_WINDOWPOSCHANGED = 71 + WM_WINDOWPOSCHANGING = 70 + WM_WININICHANGE = 26 + WM_KEYFIRST = 256 + WM_KEYLAST = 264 + WM_SYNCPAINT = 136 + WM_MOUSEACTIVATE = 33 + WM_MOUSEMOVE = 512 + WM_LBUTTONDOWN = 513 + WM_LBUTTONUP = 514 + WM_LBUTTONDBLCLK = 515 + WM_RBUTTONDOWN = 516 + WM_RBUTTONUP = 517 + WM_RBUTTONDBLCLK = 518 + WM_MBUTTONDOWN = 519 + WM_MBUTTONUP = 520 + WM_MBUTTONDBLCLK = 521 + WM_MOUSEWHEEL = 522 + WM_MOUSEFIRST = 512 + WM_XBUTTONDOWN = 523 + WM_XBUTTONUP = 524 + WM_XBUTTONDBLCLK = 525 + WM_MOUSELAST = 525 + WM_MOUSEHOVER = 0X2A1 + WM_MOUSELEAVE = 0X2A3 + WM_CLIPBOARDUPDATE = 0x031D +) + +// WM_ACTIVATE +const ( + WA_INACTIVE = 0 + WA_ACTIVE = 1 + WA_CLICKACTIVE = 2 +) + +const LF_FACESIZE = 32 + +// Font weight constants +const ( + FW_DONTCARE = 0 + FW_THIN = 100 + FW_EXTRALIGHT = 200 + FW_ULTRALIGHT = FW_EXTRALIGHT + FW_LIGHT = 300 + FW_NORMAL = 400 + FW_REGULAR = 400 + FW_MEDIUM = 500 + FW_SEMIBOLD = 600 + FW_DEMIBOLD = FW_SEMIBOLD + FW_BOLD = 700 + FW_EXTRABOLD = 800 + FW_ULTRABOLD = FW_EXTRABOLD + FW_HEAVY = 900 + FW_BLACK = FW_HEAVY +) + +// Charset constants +const ( + ANSI_CHARSET = 0 + DEFAULT_CHARSET = 1 + SYMBOL_CHARSET = 2 + SHIFTJIS_CHARSET = 128 + HANGEUL_CHARSET = 129 + HANGUL_CHARSET = 129 + GB2312_CHARSET = 134 + CHINESEBIG5_CHARSET = 136 + GREEK_CHARSET = 161 + TURKISH_CHARSET = 162 + HEBREW_CHARSET = 177 + ARABIC_CHARSET = 178 + BALTIC_CHARSET = 186 + RUSSIAN_CHARSET = 204 + THAI_CHARSET = 222 + EASTEUROPE_CHARSET = 238 + OEM_CHARSET = 255 + JOHAB_CHARSET = 130 + VIETNAMESE_CHARSET = 163 + MAC_CHARSET = 77 +) + +// Font output precision constants +const ( + OUT_DEFAULT_PRECIS = 0 + OUT_STRING_PRECIS = 1 + OUT_CHARACTER_PRECIS = 2 + OUT_STROKE_PRECIS = 3 + OUT_TT_PRECIS = 4 + OUT_DEVICE_PRECIS = 5 + OUT_RASTER_PRECIS = 6 + OUT_TT_ONLY_PRECIS = 7 + OUT_OUTLINE_PRECIS = 8 + OUT_PS_ONLY_PRECIS = 10 +) + +// Font clipping precision constants +const ( + CLIP_DEFAULT_PRECIS = 0 + CLIP_CHARACTER_PRECIS = 1 + CLIP_STROKE_PRECIS = 2 + CLIP_MASK = 15 + CLIP_LH_ANGLES = 16 + CLIP_TT_ALWAYS = 32 + CLIP_EMBEDDED = 128 +) + +// Font output quality constants +const ( + DEFAULT_QUALITY = 0 + DRAFT_QUALITY = 1 + PROOF_QUALITY = 2 + NONANTIALIASED_QUALITY = 3 + ANTIALIASED_QUALITY = 4 + CLEARTYPE_QUALITY = 5 +) + +// Font pitch constants +const ( + DEFAULT_PITCH = 0 + FIXED_PITCH = 1 + VARIABLE_PITCH = 2 +) + +// Font family constants +const ( + FF_DECORATIVE = 80 + FF_DONTCARE = 0 + FF_MODERN = 48 + FF_ROMAN = 16 + FF_SCRIPT = 64 + FF_SWISS = 32 +) + +// DeviceCapabilities capabilities +const ( + DC_FIELDS = 1 + DC_PAPERS = 2 + DC_PAPERSIZE = 3 + DC_MINEXTENT = 4 + DC_MAXEXTENT = 5 + DC_BINS = 6 + DC_DUPLEX = 7 + DC_SIZE = 8 + DC_EXTRA = 9 + DC_VERSION = 10 + DC_DRIVER = 11 + DC_BINNAMES = 12 + DC_ENUMRESOLUTIONS = 13 + DC_FILEDEPENDENCIES = 14 + DC_TRUETYPE = 15 + DC_PAPERNAMES = 16 + DC_ORIENTATION = 17 + DC_COPIES = 18 + DC_BINADJUST = 19 + DC_EMF_COMPLIANT = 20 + DC_DATATYPE_PRODUCED = 21 + DC_COLLATE = 22 + DC_MANUFACTURER = 23 + DC_MODEL = 24 + DC_PERSONALITY = 25 + DC_PRINTRATE = 26 + DC_PRINTRATEUNIT = 27 + DC_PRINTERMEM = 28 + DC_MEDIAREADY = 29 + DC_STAPLE = 30 + DC_PRINTRATEPPM = 31 + DC_COLORDEVICE = 32 + DC_NUP = 33 + DC_MEDIATYPENAMES = 34 + DC_MEDIATYPES = 35 +) + +// GetDeviceCaps index constants +const ( + DRIVERVERSION = 0 + TECHNOLOGY = 2 + HORZSIZE = 4 + VERTSIZE = 6 + HORZRES = 8 + VERTRES = 10 + LOGPIXELSX = 88 + LOGPIXELSY = 90 + BITSPIXEL = 12 + PLANES = 14 + NUMBRUSHES = 16 + NUMPENS = 18 + NUMFONTS = 22 + NUMCOLORS = 24 + NUMMARKERS = 20 + ASPECTX = 40 + ASPECTY = 42 + ASPECTXY = 44 + PDEVICESIZE = 26 + CLIPCAPS = 36 + SIZEPALETTE = 104 + NUMRESERVED = 106 + COLORRES = 108 + PHYSICALWIDTH = 110 + PHYSICALHEIGHT = 111 + PHYSICALOFFSETX = 112 + PHYSICALOFFSETY = 113 + SCALINGFACTORX = 114 + SCALINGFACTORY = 115 + VREFRESH = 116 + DESKTOPHORZRES = 118 + DESKTOPVERTRES = 117 + BLTALIGNMENT = 119 + SHADEBLENDCAPS = 120 + COLORMGMTCAPS = 121 + RASTERCAPS = 38 + CURVECAPS = 28 + LINECAPS = 30 + POLYGONALCAPS = 32 + TEXTCAPS = 34 +) + +// GetDeviceCaps TECHNOLOGY constants +const ( + DT_PLOTTER = 0 + DT_RASDISPLAY = 1 + DT_RASPRINTER = 2 + DT_RASCAMERA = 3 + DT_CHARSTREAM = 4 + DT_METAFILE = 5 + DT_DISPFILE = 6 +) + +// GetDeviceCaps SHADEBLENDCAPS constants +const ( + SB_NONE = 0x00 + SB_CONST_ALPHA = 0x01 + SB_PIXEL_ALPHA = 0x02 + SB_PREMULT_ALPHA = 0x04 + SB_GRAD_RECT = 0x10 + SB_GRAD_TRI = 0x20 +) + +// GetDeviceCaps COLORMGMTCAPS constants +const ( + CM_NONE = 0x00 + CM_DEVICE_ICM = 0x01 + CM_GAMMA_RAMP = 0x02 + CM_CMYK_COLOR = 0x04 +) + +// GetDeviceCaps RASTERCAPS constants +const ( + RC_BANDING = 2 + RC_BITBLT = 1 + RC_BITMAP64 = 8 + RC_DI_BITMAP = 128 + RC_DIBTODEV = 512 + RC_FLOODFILL = 4096 + RC_GDI20_OUTPUT = 16 + RC_PALETTE = 256 + RC_SCALING = 4 + RC_STRETCHBLT = 2048 + RC_STRETCHDIB = 8192 + RC_DEVBITS = 0x8000 + RC_OP_DX_OUTPUT = 0x4000 +) + +// GetDeviceCaps CURVECAPS constants +const ( + CC_NONE = 0 + CC_CIRCLES = 1 + CC_PIE = 2 + CC_CHORD = 4 + CC_ELLIPSES = 8 + CC_WIDE = 16 + CC_STYLED = 32 + CC_WIDESTYLED = 64 + CC_INTERIORS = 128 + CC_ROUNDRECT = 256 +) + +// GetDeviceCaps LINECAPS constants +const ( + LC_NONE = 0 + LC_POLYLINE = 2 + LC_MARKER = 4 + LC_POLYMARKER = 8 + LC_WIDE = 16 + LC_STYLED = 32 + LC_WIDESTYLED = 64 + LC_INTERIORS = 128 +) + +// GetDeviceCaps POLYGONALCAPS constants +const ( + PC_NONE = 0 + PC_POLYGON = 1 + PC_POLYPOLYGON = 256 + PC_PATHS = 512 + PC_RECTANGLE = 2 + PC_WINDPOLYGON = 4 + PC_SCANLINE = 8 + PC_TRAPEZOID = 4 + PC_WIDE = 16 + PC_STYLED = 32 + PC_WIDESTYLED = 64 + PC_INTERIORS = 128 +) + +// GetDeviceCaps TEXTCAPS constants +const ( + TC_OP_CHARACTER = 1 + TC_OP_STROKE = 2 + TC_CP_STROKE = 4 + TC_CR_90 = 8 + TC_CR_ANY = 16 + TC_SF_X_YINDEP = 32 + TC_SA_DOUBLE = 64 + TC_SA_INTEGER = 128 + TC_SA_CONTIN = 256 + TC_EA_DOUBLE = 512 + TC_IA_ABLE = 1024 + TC_UA_ABLE = 2048 + TC_SO_ABLE = 4096 + TC_RA_ABLE = 8192 + TC_VA_ABLE = 16384 + TC_RESERVED = 32768 + TC_SCROLLBLT = 65536 +) + +// Static control styles +const ( + SS_BITMAP = 14 + SS_BLACKFRAME = 7 + SS_BLACKRECT = 4 + SS_CENTER = 1 + SS_CENTERIMAGE = 512 + SS_EDITCONTROL = 0x2000 + SS_ENHMETAFILE = 15 + SS_ETCHEDFRAME = 18 + SS_ETCHEDHORZ = 16 + SS_ETCHEDVERT = 17 + SS_GRAYFRAME = 8 + SS_GRAYRECT = 5 + SS_ICON = 3 + SS_LEFT = 0 + SS_LEFTNOWORDWRAP = 0xc + SS_NOPREFIX = 128 + SS_NOTIFY = 256 + SS_OWNERDRAW = 0xd + SS_REALSIZECONTROL = 0x040 + SS_REALSIZEIMAGE = 0x800 + SS_RIGHT = 2 + SS_RIGHTJUST = 0x400 + SS_SIMPLE = 11 + SS_SUNKEN = 4096 + SS_WHITEFRAME = 9 + SS_WHITERECT = 6 + SS_USERITEM = 10 + SS_TYPEMASK = 0x0000001F + SS_ENDELLIPSIS = 0x00004000 + SS_PATHELLIPSIS = 0x00008000 + SS_WORDELLIPSIS = 0x0000C000 + SS_ELLIPSISMASK = 0x0000C000 +) + +// Edit styles +const ( + ES_LEFT = 0x0000 + ES_CENTER = 0x0001 + ES_RIGHT = 0x0002 + ES_MULTILINE = 0x0004 + ES_UPPERCASE = 0x0008 + ES_LOWERCASE = 0x0010 + ES_PASSWORD = 0x0020 + ES_AUTOVSCROLL = 0x0040 + ES_AUTOHSCROLL = 0x0080 + ES_NOHIDESEL = 0x0100 + ES_OEMCONVERT = 0x0400 + ES_READONLY = 0x0800 + ES_WANTRETURN = 0x1000 + ES_NUMBER = 0x2000 +) + +// Edit notifications +const ( + EN_SETFOCUS = 0x0100 + EN_KILLFOCUS = 0x0200 + EN_CHANGE = 0x0300 + EN_UPDATE = 0x0400 + EN_ERRSPACE = 0x0500 + EN_MAXTEXT = 0x0501 + EN_HSCROLL = 0x0601 + EN_VSCROLL = 0x0602 + EN_ALIGN_LTR_EC = 0x0700 + EN_ALIGN_RTL_EC = 0x0701 +) + +// Edit messages +const ( + EM_GETSEL = 0x00B0 + EM_SETSEL = 0x00B1 + EM_GETRECT = 0x00B2 + EM_SETRECT = 0x00B3 + EM_SETRECTNP = 0x00B4 + EM_SCROLL = 0x00B5 + EM_LINESCROLL = 0x00B6 + EM_SCROLLCARET = 0x00B7 + EM_GETMODIFY = 0x00B8 + EM_SETMODIFY = 0x00B9 + EM_GETLINECOUNT = 0x00BA + EM_LINEINDEX = 0x00BB + EM_SETHANDLE = 0x00BC + EM_GETHANDLE = 0x00BD + EM_GETTHUMB = 0x00BE + EM_LINELENGTH = 0x00C1 + EM_REPLACESEL = 0x00C2 + EM_GETLINE = 0x00C4 + EM_LIMITTEXT = 0x00C5 + EM_CANUNDO = 0x00C6 + EM_UNDO = 0x00C7 + EM_FMTLINES = 0x00C8 + EM_LINEFROMCHAR = 0x00C9 + EM_SETTABSTOPS = 0x00CB + EM_SETPASSWORDCHAR = 0x00CC + EM_EMPTYUNDOBUFFER = 0x00CD + EM_GETFIRSTVISIBLELINE = 0x00CE + EM_SETREADONLY = 0x00CF + EM_SETWORDBREAKPROC = 0x00D0 + EM_GETWORDBREAKPROC = 0x00D1 + EM_GETPASSWORDCHAR = 0x00D2 + EM_SETMARGINS = 0x00D3 + EM_GETMARGINS = 0x00D4 + EM_SETLIMITTEXT = EM_LIMITTEXT + EM_GETLIMITTEXT = 0x00D5 + EM_POSFROMCHAR = 0x00D6 + EM_CHARFROMPOS = 0x00D7 + EM_SETIMESTATUS = 0x00D8 + EM_GETIMESTATUS = 0x00D9 + EM_SETCUEBANNER = 0x1501 + EM_GETCUEBANNER = 0x1502 +) + +const ( + CCM_FIRST = 0x2000 + CCM_LAST = CCM_FIRST + 0x200 + CCM_SETBKCOLOR = 8193 + CCM_SETCOLORSCHEME = 8194 + CCM_GETCOLORSCHEME = 8195 + CCM_GETDROPTARGET = 8196 + CCM_SETUNICODEFORMAT = 8197 + CCM_GETUNICODEFORMAT = 8198 + CCM_SETVERSION = 0x2007 + CCM_GETVERSION = 0x2008 + CCM_SETNOTIFYWINDOW = 0x2009 + CCM_SETWINDOWTHEME = 0x200b + CCM_DPISCALE = 0x200c +) + +// Common controls styles +const ( + CCS_TOP = 1 + CCS_NOMOVEY = 2 + CCS_BOTTOM = 3 + CCS_NORESIZE = 4 + CCS_NOPARENTALIGN = 8 + CCS_ADJUSTABLE = 32 + CCS_NODIVIDER = 64 + CCS_VERT = 128 + CCS_LEFT = 129 + CCS_NOMOVEX = 130 + CCS_RIGHT = 131 +) + +// ProgressBar messages +const ( + PROGRESS_CLASS = "msctls_progress32" + PBM_SETPOS = WM_USER + 2 + PBM_DELTAPOS = WM_USER + 3 + PBM_SETSTEP = WM_USER + 4 + PBM_STEPIT = WM_USER + 5 + PBM_SETRANGE32 = 1030 + PBM_GETRANGE = 1031 + PBM_GETPOS = 1032 + PBM_SETBARCOLOR = 1033 + PBM_SETBKCOLOR = CCM_SETBKCOLOR + PBS_SMOOTH = 1 + PBS_VERTICAL = 4 +) + +// GetOpenFileName and GetSaveFileName extended flags +const ( + OFN_EX_NOPLACESBAR = 0x00000001 +) + +// GetOpenFileName and GetSaveFileName flags +const ( + OFN_ALLOWMULTISELECT = 0x00000200 + OFN_CREATEPROMPT = 0x00002000 + OFN_DONTADDTORECENT = 0x02000000 + OFN_ENABLEHOOK = 0x00000020 + OFN_ENABLEINCLUDENOTIFY = 0x00400000 + OFN_ENABLESIZING = 0x00800000 + OFN_ENABLETEMPLATE = 0x00000040 + OFN_ENABLETEMPLATEHANDLE = 0x00000080 + OFN_EXPLORER = 0x00080000 + OFN_EXTENSIONDIFFERENT = 0x00000400 + OFN_FILEMUSTEXIST = 0x00001000 + OFN_FORCESHOWHIDDEN = 0x10000000 + OFN_HIDEREADONLY = 0x00000004 + OFN_LONGNAMES = 0x00200000 + OFN_NOCHANGEDIR = 0x00000008 + OFN_NODEREFERENCELINKS = 0x00100000 + OFN_NOLONGNAMES = 0x00040000 + OFN_NONETWORKBUTTON = 0x00020000 + OFN_NOREADONLYRETURN = 0x00008000 + OFN_NOTESTFILECREATE = 0x00010000 + OFN_NOVALIDATE = 0x00000100 + OFN_OVERWRITEPROMPT = 0x00000002 + OFN_PATHMUSTEXIST = 0x00000800 + OFN_READONLY = 0x00000001 + OFN_SHAREAWARE = 0x00004000 + OFN_SHOWHELP = 0x00000010 +) + +//SHBrowseForFolder flags +const ( + BIF_RETURNONLYFSDIRS = 0x00000001 + BIF_DONTGOBELOWDOMAIN = 0x00000002 + BIF_STATUSTEXT = 0x00000004 + BIF_RETURNFSANCESTORS = 0x00000008 + BIF_EDITBOX = 0x00000010 + BIF_VALIDATE = 0x00000020 + BIF_NEWDIALOGSTYLE = 0x00000040 + BIF_BROWSEINCLUDEURLS = 0x00000080 + BIF_USENEWUI = BIF_EDITBOX | BIF_NEWDIALOGSTYLE + BIF_UAHINT = 0x00000100 + BIF_NONEWFOLDERBUTTON = 0x00000200 + BIF_NOTRANSLATETARGETS = 0x00000400 + BIF_BROWSEFORCOMPUTER = 0x00001000 + BIF_BROWSEFORPRINTER = 0x00002000 + BIF_BROWSEINCLUDEFILES = 0x00004000 + BIF_SHAREABLE = 0x00008000 + BIF_BROWSEFILEJUNCTIONS = 0x00010000 +) + +//MessageBox flags +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 +) + +//COM +const ( + E_INVALIDARG = 0x80070057 + E_OUTOFMEMORY = 0x8007000E + E_UNEXPECTED = 0x8000FFFF +) + +const ( + S_OK = 0 + S_FALSE = 0x0001 + RPC_E_CHANGED_MODE = 0x80010106 +) + +// GetSystemMetrics constants +const ( + SM_CXSCREEN = 0 + SM_CYSCREEN = 1 + SM_CXVSCROLL = 2 + SM_CYHSCROLL = 3 + SM_CYCAPTION = 4 + SM_CXBORDER = 5 + SM_CYBORDER = 6 + SM_CXDLGFRAME = 7 + SM_CYDLGFRAME = 8 + SM_CYVTHUMB = 9 + SM_CXHTHUMB = 10 + SM_CXICON = 11 + SM_CYICON = 12 + SM_CXCURSOR = 13 + SM_CYCURSOR = 14 + SM_CYMENU = 15 + SM_CXFULLSCREEN = 16 + SM_CYFULLSCREEN = 17 + SM_CYKANJIWINDOW = 18 + SM_MOUSEPRESENT = 19 + SM_CYVSCROLL = 20 + SM_CXHSCROLL = 21 + SM_DEBUG = 22 + SM_SWAPBUTTON = 23 + SM_RESERVED1 = 24 + SM_RESERVED2 = 25 + SM_RESERVED3 = 26 + SM_RESERVED4 = 27 + SM_CXMIN = 28 + SM_CYMIN = 29 + SM_CXSIZE = 30 + SM_CYSIZE = 31 + SM_CXFRAME = 32 + SM_CYFRAME = 33 + SM_CXMINTRACK = 34 + SM_CYMINTRACK = 35 + SM_CXDOUBLECLK = 36 + SM_CYDOUBLECLK = 37 + SM_CXICONSPACING = 38 + SM_CYICONSPACING = 39 + SM_MENUDROPALIGNMENT = 40 + SM_PENWINDOWS = 41 + SM_DBCSENABLED = 42 + SM_CMOUSEBUTTONS = 43 + SM_CXFIXEDFRAME = SM_CXDLGFRAME + SM_CYFIXEDFRAME = SM_CYDLGFRAME + SM_CXSIZEFRAME = SM_CXFRAME + SM_CYSIZEFRAME = SM_CYFRAME + SM_SECURE = 44 + SM_CXEDGE = 45 + SM_CYEDGE = 46 + SM_CXMINSPACING = 47 + SM_CYMINSPACING = 48 + SM_CXSMICON = 49 + SM_CYSMICON = 50 + SM_CYSMCAPTION = 51 + SM_CXSMSIZE = 52 + SM_CYSMSIZE = 53 + SM_CXMENUSIZE = 54 + SM_CYMENUSIZE = 55 + SM_ARRANGE = 56 + SM_CXMINIMIZED = 57 + SM_CYMINIMIZED = 58 + SM_CXMAXTRACK = 59 + SM_CYMAXTRACK = 60 + SM_CXMAXIMIZED = 61 + SM_CYMAXIMIZED = 62 + SM_NETWORK = 63 + SM_CLEANBOOT = 67 + SM_CXDRAG = 68 + SM_CYDRAG = 69 + SM_SHOWSOUNDS = 70 + SM_CXMENUCHECK = 71 + SM_CYMENUCHECK = 72 + SM_SLOWMACHINE = 73 + SM_MIDEASTENABLED = 74 + SM_MOUSEWHEELPRESENT = 75 + SM_XVIRTUALSCREEN = 76 + SM_YVIRTUALSCREEN = 77 + SM_CXVIRTUALSCREEN = 78 + SM_CYVIRTUALSCREEN = 79 + SM_CMONITORS = 80 + SM_SAMEDISPLAYFORMAT = 81 + SM_IMMENABLED = 82 + SM_CXFOCUSBORDER = 83 + SM_CYFOCUSBORDER = 84 + SM_TABLETPC = 86 + SM_MEDIACENTER = 87 + SM_STARTER = 88 + SM_SERVERR2 = 89 + SM_CMETRICS = 91 + SM_REMOTESESSION = 0x1000 + SM_SHUTTINGDOWN = 0x2000 + SM_REMOTECONTROL = 0x2001 + SM_CARETBLINKINGENABLED = 0x2002 +) + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +const ( + VT_EMPTY = 0x0 + VT_NULL = 0x1 + VT_I2 = 0x2 + VT_I4 = 0x3 + VT_R4 = 0x4 + VT_R8 = 0x5 + VT_CY = 0x6 + VT_DATE = 0x7 + VT_BSTR = 0x8 + VT_DISPATCH = 0x9 + VT_ERROR = 0xa + VT_BOOL = 0xb + VT_VARIANT = 0xc + VT_UNKNOWN = 0xd + VT_DECIMAL = 0xe + VT_I1 = 0x10 + VT_UI1 = 0x11 + VT_UI2 = 0x12 + VT_UI4 = 0x13 + VT_I8 = 0x14 + VT_UI8 = 0x15 + VT_INT = 0x16 + VT_UINT = 0x17 + VT_VOID = 0x18 + VT_HRESULT = 0x19 + VT_PTR = 0x1a + VT_SAFEARRAY = 0x1b + VT_CARRAY = 0x1c + VT_USERDEFINED = 0x1d + VT_LPSTR = 0x1e + VT_LPWSTR = 0x1f + VT_RECORD = 0x24 + VT_INT_PTR = 0x25 + VT_UINT_PTR = 0x26 + VT_FILETIME = 0x40 + VT_BLOB = 0x41 + VT_STREAM = 0x42 + VT_STORAGE = 0x43 + VT_STREAMED_OBJECT = 0x44 + VT_STORED_OBJECT = 0x45 + VT_BLOB_OBJECT = 0x46 + VT_CF = 0x47 + VT_CLSID = 0x48 + VT_BSTR_BLOB = 0xfff + VT_VECTOR = 0x1000 + VT_ARRAY = 0x2000 + VT_BYREF = 0x4000 + VT_RESERVED = 0x8000 + VT_ILLEGAL = 0xffff + VT_ILLEGALMASKED = 0xfff + VT_TYPEMASK = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + MONITOR_DEFAULTTONULL = 0x00000000 + MONITOR_DEFAULTTOPRIMARY = 0x00000001 + MONITOR_DEFAULTTONEAREST = 0x00000002 + + MONITORINFOF_PRIMARY = 0x00000001 +) + +const ( + CCHDEVICENAME = 32 + CCHFORMNAME = 32 +) + +const ( + IDOK = 1 + IDCANCEL = 2 + IDABORT = 3 + IDRETRY = 4 + IDIGNORE = 5 + IDYES = 6 + IDNO = 7 + IDCLOSE = 8 + IDHELP = 9 + IDTRYAGAIN = 10 + IDCONTINUE = 11 + IDTIMEOUT = 32000 +) + +// Generic WM_NOTIFY notification codes +const ( + NM_FIRST = 0 + NM_OUTOFMEMORY = NM_FIRST - 1 + NM_CLICK = NM_FIRST - 2 + NM_DBLCLK = NM_FIRST - 3 + NM_RETURN = NM_FIRST - 4 + NM_RCLICK = NM_FIRST - 5 + NM_RDBLCLK = NM_FIRST - 6 + NM_SETFOCUS = NM_FIRST - 7 + NM_KILLFOCUS = NM_FIRST - 8 + NM_CUSTOMDRAW = NM_FIRST - 12 + NM_HOVER = NM_FIRST - 13 + NM_NCHITTEST = NM_FIRST - 14 + NM_KEYDOWN = NM_FIRST - 15 + NM_RELEASEDCAPTURE = NM_FIRST - 16 + NM_SETCURSOR = NM_FIRST - 17 + NM_CHAR = NM_FIRST - 18 + NM_TOOLTIPSCREATED = NM_FIRST - 19 + NM_LAST = NM_FIRST - 99 +) + +// ListView messages +const ( + LVM_FIRST = 0x1000 + LVM_GETITEMCOUNT = LVM_FIRST + 4 + LVM_SETIMAGELIST = LVM_FIRST + 3 + LVM_GETIMAGELIST = LVM_FIRST + 2 + LVM_GETITEM = LVM_FIRST + 75 + LVM_SETITEM = LVM_FIRST + 76 + LVM_INSERTITEM = LVM_FIRST + 77 + LVM_DELETEITEM = LVM_FIRST + 8 + LVM_DELETEALLITEMS = LVM_FIRST + 9 + LVM_GETCALLBACKMASK = LVM_FIRST + 10 + LVM_SETCALLBACKMASK = LVM_FIRST + 11 + LVM_SETUNICODEFORMAT = CCM_SETUNICODEFORMAT + LVM_GETNEXTITEM = LVM_FIRST + 12 + LVM_FINDITEM = LVM_FIRST + 83 + LVM_GETITEMRECT = LVM_FIRST + 14 + LVM_GETSTRINGWIDTH = LVM_FIRST + 87 + LVM_HITTEST = LVM_FIRST + 18 + LVM_ENSUREVISIBLE = LVM_FIRST + 19 + LVM_SCROLL = LVM_FIRST + 20 + LVM_REDRAWITEMS = LVM_FIRST + 21 + LVM_ARRANGE = LVM_FIRST + 22 + LVM_EDITLABEL = LVM_FIRST + 118 + LVM_GETEDITCONTROL = LVM_FIRST + 24 + LVM_GETCOLUMN = LVM_FIRST + 95 + LVM_SETCOLUMN = LVM_FIRST + 96 + LVM_INSERTCOLUMN = LVM_FIRST + 97 + LVM_DELETECOLUMN = LVM_FIRST + 28 + LVM_GETCOLUMNWIDTH = LVM_FIRST + 29 + LVM_SETCOLUMNWIDTH = LVM_FIRST + 30 + LVM_GETHEADER = LVM_FIRST + 31 + LVM_CREATEDRAGIMAGE = LVM_FIRST + 33 + LVM_GETVIEWRECT = LVM_FIRST + 34 + LVM_GETTEXTCOLOR = LVM_FIRST + 35 + LVM_SETTEXTCOLOR = LVM_FIRST + 36 + LVM_GETTEXTBKCOLOR = LVM_FIRST + 37 + LVM_SETTEXTBKCOLOR = LVM_FIRST + 38 + LVM_GETTOPINDEX = LVM_FIRST + 39 + LVM_GETCOUNTPERPAGE = LVM_FIRST + 40 + LVM_GETORIGIN = LVM_FIRST + 41 + LVM_UPDATE = LVM_FIRST + 42 + LVM_SETITEMSTATE = LVM_FIRST + 43 + LVM_GETITEMSTATE = LVM_FIRST + 44 + LVM_GETITEMTEXT = LVM_FIRST + 115 + LVM_SETITEMTEXT = LVM_FIRST + 116 + LVM_SETITEMCOUNT = LVM_FIRST + 47 + LVM_SORTITEMS = LVM_FIRST + 48 + LVM_SETITEMPOSITION32 = LVM_FIRST + 49 + LVM_GETSELECTEDCOUNT = LVM_FIRST + 50 + LVM_GETITEMSPACING = LVM_FIRST + 51 + LVM_GETISEARCHSTRING = LVM_FIRST + 117 + LVM_SETICONSPACING = LVM_FIRST + 53 + LVM_SETEXTENDEDLISTVIEWSTYLE = LVM_FIRST + 54 + LVM_GETEXTENDEDLISTVIEWSTYLE = LVM_FIRST + 55 + LVM_GETSUBITEMRECT = LVM_FIRST + 56 + LVM_SUBITEMHITTEST = LVM_FIRST + 57 + LVM_SETCOLUMNORDERARRAY = LVM_FIRST + 58 + LVM_GETCOLUMNORDERARRAY = LVM_FIRST + 59 + LVM_SETHOTITEM = LVM_FIRST + 60 + LVM_GETHOTITEM = LVM_FIRST + 61 + LVM_SETHOTCURSOR = LVM_FIRST + 62 + LVM_GETHOTCURSOR = LVM_FIRST + 63 + LVM_APPROXIMATEVIEWRECT = LVM_FIRST + 64 + LVM_SETWORKAREAS = LVM_FIRST + 65 + LVM_GETWORKAREAS = LVM_FIRST + 70 + LVM_GETNUMBEROFWORKAREAS = LVM_FIRST + 73 + LVM_GETSELECTIONMARK = LVM_FIRST + 66 + LVM_SETSELECTIONMARK = LVM_FIRST + 67 + LVM_SETHOVERTIME = LVM_FIRST + 71 + LVM_GETHOVERTIME = LVM_FIRST + 72 + LVM_SETTOOLTIPS = LVM_FIRST + 74 + LVM_GETTOOLTIPS = LVM_FIRST + 78 + LVM_SORTITEMSEX = LVM_FIRST + 81 + LVM_SETBKIMAGE = LVM_FIRST + 138 + LVM_GETBKIMAGE = LVM_FIRST + 139 + LVM_SETSELECTEDCOLUMN = LVM_FIRST + 140 + LVM_SETVIEW = LVM_FIRST + 142 + LVM_GETVIEW = LVM_FIRST + 143 + LVM_INSERTGROUP = LVM_FIRST + 145 + LVM_SETGROUPINFO = LVM_FIRST + 147 + LVM_GETGROUPINFO = LVM_FIRST + 149 + LVM_REMOVEGROUP = LVM_FIRST + 150 + LVM_MOVEGROUP = LVM_FIRST + 151 + LVM_GETGROUPCOUNT = LVM_FIRST + 152 + LVM_GETGROUPINFOBYINDEX = LVM_FIRST + 153 + LVM_MOVEITEMTOGROUP = LVM_FIRST + 154 + LVM_GETGROUPRECT = LVM_FIRST + 98 + LVM_SETGROUPMETRICS = LVM_FIRST + 155 + LVM_GETGROUPMETRICS = LVM_FIRST + 156 + LVM_ENABLEGROUPVIEW = LVM_FIRST + 157 + LVM_SORTGROUPS = LVM_FIRST + 158 + LVM_INSERTGROUPSORTED = LVM_FIRST + 159 + LVM_REMOVEALLGROUPS = LVM_FIRST + 160 + LVM_HASGROUP = LVM_FIRST + 161 + LVM_GETGROUPSTATE = LVM_FIRST + 92 + LVM_GETFOCUSEDGROUP = LVM_FIRST + 93 + LVM_SETTILEVIEWINFO = LVM_FIRST + 162 + LVM_GETTILEVIEWINFO = LVM_FIRST + 163 + LVM_SETTILEINFO = LVM_FIRST + 164 + LVM_GETTILEINFO = LVM_FIRST + 165 + LVM_SETINSERTMARK = LVM_FIRST + 166 + LVM_GETINSERTMARK = LVM_FIRST + 167 + LVM_INSERTMARKHITTEST = LVM_FIRST + 168 + LVM_GETINSERTMARKRECT = LVM_FIRST + 169 + LVM_SETINSERTMARKCOLOR = LVM_FIRST + 170 + LVM_GETINSERTMARKCOLOR = LVM_FIRST + 171 + LVM_SETINFOTIP = LVM_FIRST + 173 + LVM_GETSELECTEDCOLUMN = LVM_FIRST + 174 + LVM_ISGROUPVIEWENABLED = LVM_FIRST + 175 + LVM_GETOUTLINECOLOR = LVM_FIRST + 176 + LVM_SETOUTLINECOLOR = LVM_FIRST + 177 + LVM_CANCELEDITLABEL = LVM_FIRST + 179 + LVM_MAPINDEXTOID = LVM_FIRST + 180 + LVM_MAPIDTOINDEX = LVM_FIRST + 181 + LVM_ISITEMVISIBLE = LVM_FIRST + 182 + LVM_GETNEXTITEMINDEX = LVM_FIRST + 211 +) + +// ListView notifications +const ( + LVN_FIRST = -100 + + LVN_ITEMCHANGING = LVN_FIRST - 0 + LVN_ITEMCHANGED = LVN_FIRST - 1 + LVN_INSERTITEM = LVN_FIRST - 2 + LVN_DELETEITEM = LVN_FIRST - 3 + LVN_DELETEALLITEMS = LVN_FIRST - 4 + LVN_BEGINLABELEDITA = LVN_FIRST - 5 + LVN_BEGINLABELEDITW = LVN_FIRST - 75 + LVN_ENDLABELEDITA = LVN_FIRST - 6 + LVN_ENDLABELEDITW = LVN_FIRST - 76 + LVN_COLUMNCLICK = LVN_FIRST - 8 + LVN_BEGINDRAG = LVN_FIRST - 9 + LVN_BEGINRDRAG = LVN_FIRST - 11 + LVN_ODCACHEHINT = LVN_FIRST - 13 + LVN_ODFINDITEMA = LVN_FIRST - 52 + LVN_ODFINDITEMW = LVN_FIRST - 79 + LVN_ITEMACTIVATE = LVN_FIRST - 14 + LVN_ODSTATECHANGED = LVN_FIRST - 15 + LVN_HOTTRACK = LVN_FIRST - 21 + LVN_GETDISPINFO = LVN_FIRST - 77 + LVN_SETDISPINFO = LVN_FIRST - 78 + LVN_KEYDOWN = LVN_FIRST - 55 + LVN_MARQUEEBEGIN = LVN_FIRST - 56 + LVN_GETINFOTIP = LVN_FIRST - 58 + LVN_INCREMENTALSEARCH = LVN_FIRST - 63 + LVN_BEGINSCROLL = LVN_FIRST - 80 + LVN_ENDSCROLL = LVN_FIRST - 81 +) + +// ListView LVNI constants +const ( + LVNI_ALL = 0 + LVNI_FOCUSED = 1 + LVNI_SELECTED = 2 + LVNI_CUT = 4 + LVNI_DROPHILITED = 8 + LVNI_ABOVE = 256 + LVNI_BELOW = 512 + LVNI_TOLEFT = 1024 + LVNI_TORIGHT = 2048 +) + +// ListView styles +const ( + LVS_ICON = 0x0000 + LVS_REPORT = 0x0001 + LVS_SMALLICON = 0x0002 + LVS_LIST = 0x0003 + LVS_TYPEMASK = 0x0003 + LVS_SINGLESEL = 0x0004 + LVS_SHOWSELALWAYS = 0x0008 + LVS_SORTASCENDING = 0x0010 + LVS_SORTDESCENDING = 0x0020 + LVS_SHAREIMAGELISTS = 0x0040 + LVS_NOLABELWRAP = 0x0080 + LVS_AUTOARRANGE = 0x0100 + LVS_EDITLABELS = 0x0200 + LVS_OWNERDATA = 0x1000 + LVS_NOSCROLL = 0x2000 + LVS_TYPESTYLEMASK = 0xfc00 + LVS_ALIGNTOP = 0x0000 + LVS_ALIGNLEFT = 0x0800 + LVS_ALIGNMASK = 0x0c00 + LVS_OWNERDRAWFIXED = 0x0400 + LVS_NOCOLUMNHEADER = 0x4000 + LVS_NOSORTHEADER = 0x8000 +) + +// ListView extended styles +const ( + LVS_EX_GRIDLINES = 0x00000001 + LVS_EX_SUBITEMIMAGES = 0x00000002 + LVS_EX_CHECKBOXES = 0x00000004 + LVS_EX_TRACKSELECT = 0x00000008 + LVS_EX_HEADERDRAGDROP = 0x00000010 + LVS_EX_FULLROWSELECT = 0x00000020 + LVS_EX_ONECLICKACTIVATE = 0x00000040 + LVS_EX_TWOCLICKACTIVATE = 0x00000080 + LVS_EX_FLATSB = 0x00000100 + LVS_EX_REGIONAL = 0x00000200 + LVS_EX_INFOTIP = 0x00000400 + LVS_EX_UNDERLINEHOT = 0x00000800 + LVS_EX_UNDERLINECOLD = 0x00001000 + LVS_EX_MULTIWORKAREAS = 0x00002000 + LVS_EX_LABELTIP = 0x00004000 + LVS_EX_BORDERSELECT = 0x00008000 + LVS_EX_DOUBLEBUFFER = 0x00010000 + LVS_EX_HIDELABELS = 0x00020000 + LVS_EX_SINGLEROW = 0x00040000 + LVS_EX_SNAPTOGRID = 0x00080000 + LVS_EX_SIMPLESELECT = 0x00100000 +) + +// ListView column flags +const ( + LVCF_FMT = 0x0001 + LVCF_WIDTH = 0x0002 + LVCF_TEXT = 0x0004 + LVCF_SUBITEM = 0x0008 + LVCF_IMAGE = 0x0010 + LVCF_ORDER = 0x0020 +) + +// ListView column format constants +const ( + LVCFMT_LEFT = 0x0000 + LVCFMT_RIGHT = 0x0001 + LVCFMT_CENTER = 0x0002 + LVCFMT_JUSTIFYMASK = 0x0003 + LVCFMT_IMAGE = 0x0800 + LVCFMT_BITMAP_ON_RIGHT = 0x1000 + LVCFMT_COL_HAS_IMAGES = 0x8000 +) + +// ListView item flags +const ( + LVIF_TEXT = 0x00000001 + LVIF_IMAGE = 0x00000002 + LVIF_PARAM = 0x00000004 + LVIF_STATE = 0x00000008 + LVIF_INDENT = 0x00000010 + LVIF_NORECOMPUTE = 0x00000800 + LVIF_GROUPID = 0x00000100 + LVIF_COLUMNS = 0x00000200 +) + +// ListView item states +const ( + LVIS_FOCUSED = 1 + LVIS_SELECTED = 2 + LVIS_CUT = 4 + LVIS_DROPHILITED = 8 + LVIS_OVERLAYMASK = 0xF00 + LVIS_STATEIMAGEMASK = 0xF000 +) + +// ListView hit test constants +const ( + LVHT_NOWHERE = 0x00000001 + LVHT_ONITEMICON = 0x00000002 + LVHT_ONITEMLABEL = 0x00000004 + LVHT_ONITEMSTATEICON = 0x00000008 + LVHT_ONITEM = LVHT_ONITEMICON | LVHT_ONITEMLABEL | LVHT_ONITEMSTATEICON + + LVHT_ABOVE = 0x00000008 + LVHT_BELOW = 0x00000010 + LVHT_TORIGHT = 0x00000020 + LVHT_TOLEFT = 0x00000040 +) + +// ListView image list types +const ( + LVSIL_NORMAL = 0 + LVSIL_SMALL = 1 + LVSIL_STATE = 2 + LVSIL_GROUPHEADER = 3 +) + +// InitCommonControlsEx flags +const ( + ICC_LISTVIEW_CLASSES = 1 + ICC_TREEVIEW_CLASSES = 2 + ICC_BAR_CLASSES = 4 + ICC_TAB_CLASSES = 8 + ICC_UPDOWN_CLASS = 16 + ICC_PROGRESS_CLASS = 32 + ICC_HOTKEY_CLASS = 64 + ICC_ANIMATE_CLASS = 128 + ICC_WIN95_CLASSES = 255 + ICC_DATE_CLASSES = 256 + ICC_USEREX_CLASSES = 512 + ICC_COOL_CLASSES = 1024 + ICC_INTERNET_CLASSES = 2048 + ICC_PAGESCROLLER_CLASS = 4096 + ICC_NATIVEFNTCTL_CLASS = 8192 + INFOTIPSIZE = 1024 + ICC_STANDARD_CLASSES = 0x00004000 + ICC_LINK_CLASS = 0x00008000 +) + +// Dialog Codes +const ( + DLGC_WANTARROWS = 0x0001 + DLGC_WANTTAB = 0x0002 + DLGC_WANTALLKEYS = 0x0004 + DLGC_WANTMESSAGE = 0x0004 + DLGC_HASSETSEL = 0x0008 + DLGC_DEFPUSHBUTTON = 0x0010 + DLGC_UNDEFPUSHBUTTON = 0x0020 + DLGC_RADIOBUTTON = 0x0040 + DLGC_WANTCHARS = 0x0080 + DLGC_STATIC = 0x0100 + DLGC_BUTTON = 0x2000 +) + +// Get/SetWindowWord/Long offsets for use with WC_DIALOG windows +const ( + DWL_MSGRESULT = 0 + DWL_DLGPROC = 4 + DWL_USER = 8 +) + +// Registry predefined keys +const ( + HKEY_CLASSES_ROOT HKEY = 0x80000000 + HKEY_CURRENT_USER HKEY = 0x80000001 + HKEY_LOCAL_MACHINE HKEY = 0x80000002 + HKEY_USERS HKEY = 0x80000003 + HKEY_PERFORMANCE_DATA HKEY = 0x80000004 + HKEY_CURRENT_CONFIG HKEY = 0x80000005 + HKEY_DYN_DATA HKEY = 0x80000006 +) + +// Registry Key Security and Access Rights +const ( + KEY_ALL_ACCESS = 0xF003F + KEY_CREATE_SUB_KEY = 0x0004 + KEY_ENUMERATE_SUB_KEYS = 0x0008 + KEY_NOTIFY = 0x0010 + KEY_QUERY_VALUE = 0x0001 + KEY_SET_VALUE = 0x0002 + KEY_READ = 0x20019 + KEY_WRITE = 0x20006 +) + +const ( + NFR_ANSI = 1 + NFR_UNICODE = 2 + NF_QUERY = 3 + NF_REQUERY = 4 +) + +// Registry value types +const ( + RRF_RT_REG_NONE = 0x00000001 + RRF_RT_REG_SZ = 0x00000002 + RRF_RT_REG_EXPAND_SZ = 0x00000004 + RRF_RT_REG_BINARY = 0x00000008 + RRF_RT_REG_DWORD = 0x00000010 + RRF_RT_REG_MULTI_SZ = 0x00000020 + RRF_RT_REG_QWORD = 0x00000040 + RRF_RT_DWORD = (RRF_RT_REG_BINARY | RRF_RT_REG_DWORD) + RRF_RT_QWORD = (RRF_RT_REG_BINARY | RRF_RT_REG_QWORD) + RRF_RT_ANY = 0x0000ffff + RRF_NOEXPAND = 0x10000000 + RRF_ZEROONFAILURE = 0x20000000 + REG_PROCESS_APPKEY = 0x00000001 + REG_MUI_STRING_TRUNCATE = 0x00000001 +) + +// PeekMessage wRemoveMsg value +const ( + PM_NOREMOVE = 0x000 + PM_REMOVE = 0x001 + PM_NOYIELD = 0x002 +) + +// ImageList flags +const ( + ILC_MASK = 0x00000001 + ILC_COLOR = 0x00000000 + ILC_COLORDDB = 0x000000FE + ILC_COLOR4 = 0x00000004 + ILC_COLOR8 = 0x00000008 + ILC_COLOR16 = 0x00000010 + ILC_COLOR24 = 0x00000018 + ILC_COLOR32 = 0x00000020 + ILC_PALETTE = 0x00000800 + ILC_MIRROR = 0x00002000 + ILC_PERITEMMIRROR = 0x00008000 + ILC_ORIGINALSIZE = 0x00010000 + ILC_HIGHQUALITYSCALE = 0x00020000 +) + +// Keystroke Message Flags +const ( + KF_EXTENDED = 0x0100 + KF_DLGMODE = 0x0800 + KF_MENUMODE = 0x1000 + KF_ALTDOWN = 0x2000 + KF_REPEAT = 0x4000 + KF_UP = 0x8000 +) + +// Virtual-Key Codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Registry Value Types +const ( + REG_NONE = 0 + REG_SZ = 1 + REG_EXPAND_SZ = 2 + REG_BINARY = 3 + REG_DWORD = 4 + REG_DWORD_LITTLE_ENDIAN = 4 + REG_DWORD_BIG_ENDIAN = 5 + REG_LINK = 6 + REG_MULTI_SZ = 7 + REG_RESOURCE_LIST = 8 + REG_FULL_RESOURCE_DESCRIPTOR = 9 + REG_RESOURCE_REQUIREMENTS_LIST = 10 + REG_QWORD = 11 + REG_QWORD_LITTLE_ENDIAN = 11 +) + +// Tooltip styles +const ( + TTS_ALWAYSTIP = 0x01 + TTS_NOPREFIX = 0x02 + TTS_NOANIMATE = 0x10 + TTS_NOFADE = 0x20 + TTS_BALLOON = 0x40 + TTS_CLOSE = 0x80 + TTS_USEVISUALSTYLE = 0x100 +) + +// Tooltip messages +const ( + TTM_ACTIVATE = (WM_USER + 1) + TTM_SETDELAYTIME = (WM_USER + 3) + TTM_ADDTOOL = (WM_USER + 50) + TTM_DELTOOL = (WM_USER + 51) + TTM_NEWTOOLRECT = (WM_USER + 52) + TTM_RELAYEVENT = (WM_USER + 7) + TTM_GETTOOLINFO = (WM_USER + 53) + TTM_SETTOOLINFO = (WM_USER + 54) + TTM_HITTEST = (WM_USER + 55) + TTM_GETTEXT = (WM_USER + 56) + TTM_UPDATETIPTEXT = (WM_USER + 57) + TTM_GETTOOLCOUNT = (WM_USER + 13) + TTM_ENUMTOOLS = (WM_USER + 58) + TTM_GETCURRENTTOOL = (WM_USER + 59) + TTM_WINDOWFROMPOINT = (WM_USER + 16) + TTM_TRACKACTIVATE = (WM_USER + 17) + TTM_TRACKPOSITION = (WM_USER + 18) + TTM_SETTIPBKCOLOR = (WM_USER + 19) + TTM_SETTIPTEXTCOLOR = (WM_USER + 20) + TTM_GETDELAYTIME = (WM_USER + 21) + TTM_GETTIPBKCOLOR = (WM_USER + 22) + TTM_GETTIPTEXTCOLOR = (WM_USER + 23) + TTM_SETMAXTIPWIDTH = (WM_USER + 24) + TTM_GETMAXTIPWIDTH = (WM_USER + 25) + TTM_SETMARGIN = (WM_USER + 26) + TTM_GETMARGIN = (WM_USER + 27) + TTM_POP = (WM_USER + 28) + TTM_UPDATE = (WM_USER + 29) + TTM_GETBUBBLESIZE = (WM_USER + 30) + TTM_ADJUSTRECT = (WM_USER + 31) + TTM_SETTITLE = (WM_USER + 33) + TTM_POPUP = (WM_USER + 34) + TTM_GETTITLE = (WM_USER + 35) +) + +// Tooltip icons +const ( + TTI_NONE = 0 + TTI_INFO = 1 + TTI_WARNING = 2 + TTI_ERROR = 3 + TTI_INFO_LARGE = 4 + TTI_WARNING_LARGE = 5 + TTI_ERROR_LARGE = 6 +) + +// Tooltip notifications +const ( + TTN_FIRST = -520 + TTN_LAST = -549 + TTN_GETDISPINFO = (TTN_FIRST - 10) + TTN_SHOW = (TTN_FIRST - 1) + TTN_POP = (TTN_FIRST - 2) + TTN_LINKCLICK = (TTN_FIRST - 3) + TTN_NEEDTEXT = TTN_GETDISPINFO +) + +const ( + TTF_IDISHWND = 0x0001 + TTF_CENTERTIP = 0x0002 + TTF_RTLREADING = 0x0004 + TTF_SUBCLASS = 0x0010 + TTF_TRACK = 0x0020 + TTF_ABSOLUTE = 0x0080 + TTF_TRANSPARENT = 0x0100 + TTF_PARSELINKS = 0x1000 + TTF_DI_SETITEM = 0x8000 +) + +const ( + SWP_NOSIZE = 0x0001 + SWP_NOMOVE = 0x0002 + SWP_NOZORDER = 0x0004 + SWP_NOREDRAW = 0x0008 + SWP_NOACTIVATE = 0x0010 + SWP_FRAMECHANGED = 0x0020 + SWP_SHOWWINDOW = 0x0040 + SWP_HIDEWINDOW = 0x0080 + SWP_NOCOPYBITS = 0x0100 + SWP_NOOWNERZORDER = 0x0200 + SWP_NOSENDCHANGING = 0x0400 + SWP_DRAWFRAME = SWP_FRAMECHANGED + SWP_NOREPOSITION = SWP_NOOWNERZORDER + SWP_DEFERERASE = 0x2000 + SWP_ASYNCWINDOWPOS = 0x4000 +) + +// Predefined window handles +const ( + HWND_BROADCAST = HWND(0xFFFF) + HWND_BOTTOM = HWND(1) + HWND_NOTOPMOST = ^HWND(1) // -2 + HWND_TOP = HWND(0) + HWND_TOPMOST = ^HWND(0) // -1 + HWND_DESKTOP = HWND(0) + HWND_MESSAGE = ^HWND(2) // -3 +) + +// Pen types +const ( + PS_COSMETIC = 0x00000000 + PS_GEOMETRIC = 0x00010000 + PS_TYPE_MASK = 0x000F0000 +) + +// Pen styles +const ( + PS_SOLID = 0 + PS_DASH = 1 + PS_DOT = 2 + PS_DASHDOT = 3 + PS_DASHDOTDOT = 4 + PS_NULL = 5 + PS_INSIDEFRAME = 6 + PS_USERSTYLE = 7 + PS_ALTERNATE = 8 + PS_STYLE_MASK = 0x0000000F +) + +// Pen cap types +const ( + PS_ENDCAP_ROUND = 0x00000000 + PS_ENDCAP_SQUARE = 0x00000100 + PS_ENDCAP_FLAT = 0x00000200 + PS_ENDCAP_MASK = 0x00000F00 +) + +// Pen join types +const ( + PS_JOIN_ROUND = 0x00000000 + PS_JOIN_BEVEL = 0x00001000 + PS_JOIN_MITER = 0x00002000 + PS_JOIN_MASK = 0x0000F000 +) + +// Hatch styles +const ( + HS_HORIZONTAL = 0 + HS_VERTICAL = 1 + HS_FDIAGONAL = 2 + HS_BDIAGONAL = 3 + HS_CROSS = 4 + HS_DIAGCROSS = 5 +) + +// Stock Logical Objects +const ( + WHITE_BRUSH = 0 + LTGRAY_BRUSH = 1 + GRAY_BRUSH = 2 + DKGRAY_BRUSH = 3 + BLACK_BRUSH = 4 + NULL_BRUSH = 5 + HOLLOW_BRUSH = NULL_BRUSH + WHITE_PEN = 6 + BLACK_PEN = 7 + NULL_PEN = 8 + OEM_FIXED_FONT = 10 + ANSI_FIXED_FONT = 11 + ANSI_VAR_FONT = 12 + SYSTEM_FONT = 13 + DEVICE_DEFAULT_FONT = 14 + DEFAULT_PALETTE = 15 + SYSTEM_FIXED_FONT = 16 + DEFAULT_GUI_FONT = 17 + DC_BRUSH = 18 + DC_PEN = 19 +) + +// Brush styles +const ( + BS_SOLID = 0 + BS_NULL = 1 + BS_HOLLOW = BS_NULL + BS_HATCHED = 2 + BS_PATTERN = 3 + BS_INDEXED = 4 + BS_DIBPATTERN = 5 + BS_DIBPATTERNPT = 6 + BS_PATTERN8X8 = 7 + BS_DIBPATTERN8X8 = 8 + BS_MONOPATTERN = 9 +) + +// TRACKMOUSEEVENT flags +const ( + TME_HOVER = 0x00000001 + TME_LEAVE = 0x00000002 + TME_NONCLIENT = 0x00000010 + TME_QUERY = 0x40000000 + TME_CANCEL = 0x80000000 + + HOVER_DEFAULT = 0xFFFFFFFF +) + +// WM_NCHITTEST and MOUSEHOOKSTRUCT Mouse Position Codes +const ( + HTERROR = (-2) + HTTRANSPARENT = (-1) + HTNOWHERE = 0 + HTCLIENT = 1 + HTCAPTION = 2 + HTSYSMENU = 3 + HTGROWBOX = 4 + HTSIZE = HTGROWBOX + HTMENU = 5 + HTHSCROLL = 6 + HTVSCROLL = 7 + HTMINBUTTON = 8 + HTMAXBUTTON = 9 + HTLEFT = 10 + HTRIGHT = 11 + HTTOP = 12 + HTTOPLEFT = 13 + HTTOPRIGHT = 14 + HTBOTTOM = 15 + HTBOTTOMLEFT = 16 + HTBOTTOMRIGHT = 17 + HTBORDER = 18 + HTREDUCE = HTMINBUTTON + HTZOOM = HTMAXBUTTON + HTSIZEFIRST = HTLEFT + HTSIZELAST = HTBOTTOMRIGHT + HTOBJECT = 19 + HTCLOSE = 20 + HTHELP = 21 +) + +// DrawText[Ex] format flags +const ( + DT_TOP = 0x00000000 + DT_LEFT = 0x00000000 + DT_CENTER = 0x00000001 + DT_RIGHT = 0x00000002 + DT_VCENTER = 0x00000004 + DT_BOTTOM = 0x00000008 + DT_WORDBREAK = 0x00000010 + DT_SINGLELINE = 0x00000020 + DT_EXPANDTABS = 0x00000040 + DT_TABSTOP = 0x00000080 + DT_NOCLIP = 0x00000100 + DT_EXTERNALLEADING = 0x00000200 + DT_CALCRECT = 0x00000400 + DT_NOPREFIX = 0x00000800 + DT_INTERNAL = 0x00001000 + DT_EDITCONTROL = 0x00002000 + DT_PATH_ELLIPSIS = 0x00004000 + DT_END_ELLIPSIS = 0x00008000 + DT_MODIFYSTRING = 0x00010000 + DT_RTLREADING = 0x00020000 + DT_WORD_ELLIPSIS = 0x00040000 + DT_NOFULLWIDTHCHARBREAK = 0x00080000 + DT_HIDEPREFIX = 0x00100000 + DT_PREFIXONLY = 0x00200000 +) + +const CLR_INVALID = 0xFFFFFFFF + +// Background Modes +const ( + TRANSPARENT = 1 + OPAQUE = 2 + BKMODE_LAST = 2 +) + +// Global Memory Flags +const ( + GMEM_FIXED = 0x0000 + GMEM_MOVEABLE = 0x0002 + GMEM_NOCOMPACT = 0x0010 + GMEM_NODISCARD = 0x0020 + GMEM_ZEROINIT = 0x0040 + GMEM_MODIFY = 0x0080 + GMEM_DISCARDABLE = 0x0100 + GMEM_NOT_BANKED = 0x1000 + GMEM_SHARE = 0x2000 + GMEM_DDESHARE = 0x2000 + GMEM_NOTIFY = 0x4000 + GMEM_LOWER = GMEM_NOT_BANKED + GMEM_VALID_FLAGS = 0x7F72 + GMEM_INVALID_HANDLE = 0x8000 + GHND = (GMEM_MOVEABLE | GMEM_ZEROINIT) + GPTR = (GMEM_FIXED | GMEM_ZEROINIT) +) + +// Ternary raster operations +const ( + SRCCOPY = 0x00CC0020 + SRCPAINT = 0x00EE0086 + SRCAND = 0x008800C6 + SRCINVERT = 0x00660046 + SRCERASE = 0x00440328 + NOTSRCCOPY = 0x00330008 + NOTSRCERASE = 0x001100A6 + MERGECOPY = 0x00C000CA + MERGEPAINT = 0x00BB0226 + PATCOPY = 0x00F00021 + PATPAINT = 0x00FB0A09 + PATINVERT = 0x005A0049 + DSTINVERT = 0x00550009 + BLACKNESS = 0x00000042 + WHITENESS = 0x00FF0062 + NOMIRRORBITMAP = 0x80000000 + CAPTUREBLT = 0x40000000 +) + +// Clipboard formats +const ( + CF_TEXT = 1 + CF_BITMAP = 2 + CF_METAFILEPICT = 3 + CF_SYLK = 4 + CF_DIF = 5 + CF_TIFF = 6 + CF_OEMTEXT = 7 + CF_DIB = 8 + CF_PALETTE = 9 + CF_PENDATA = 10 + CF_RIFF = 11 + CF_WAVE = 12 + CF_UNICODETEXT = 13 + CF_ENHMETAFILE = 14 + CF_HDROP = 15 + CF_LOCALE = 16 + CF_DIBV5 = 17 + CF_MAX = 18 + CF_OWNERDISPLAY = 0x0080 + CF_DSPTEXT = 0x0081 + CF_DSPBITMAP = 0x0082 + CF_DSPMETAFILEPICT = 0x0083 + CF_DSPENHMETAFILE = 0x008E + CF_PRIVATEFIRST = 0x0200 + CF_PRIVATELAST = 0x02FF + CF_GDIOBJFIRST = 0x0300 + CF_GDIOBJLAST = 0x03FF +) + +// Bitmap compression formats +const ( + BI_RGB = 0 + BI_RLE8 = 1 + BI_RLE4 = 2 + BI_BITFIELDS = 3 + BI_JPEG = 4 + BI_PNG = 5 +) + +// SetDIBitsToDevice fuColorUse +const ( + DIB_PAL_COLORS = 1 + DIB_RGB_COLORS = 0 +) + +const ( + STANDARD_RIGHTS_REQUIRED = 0x000F +) + +// Service Control Manager object specific access types +const ( + SC_MANAGER_CONNECT = 0x0001 + SC_MANAGER_CREATE_SERVICE = 0x0002 + SC_MANAGER_ENUMERATE_SERVICE = 0x0004 + SC_MANAGER_LOCK = 0x0008 + SC_MANAGER_QUERY_LOCK_STATUS = 0x0010 + SC_MANAGER_MODIFY_BOOT_CONFIG = 0x0020 + SC_MANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SC_MANAGER_CONNECT | SC_MANAGER_CREATE_SERVICE | SC_MANAGER_ENUMERATE_SERVICE | SC_MANAGER_LOCK | SC_MANAGER_QUERY_LOCK_STATUS | SC_MANAGER_MODIFY_BOOT_CONFIG +) + +// Service Types (Bit Mask) +const ( + SERVICE_KERNEL_DRIVER = 0x00000001 + SERVICE_FILE_SYSTEM_DRIVER = 0x00000002 + SERVICE_ADAPTER = 0x00000004 + SERVICE_RECOGNIZER_DRIVER = 0x00000008 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_WIN32_OWN_PROCESS = 0x00000010 + SERVICE_WIN32_SHARE_PROCESS = 0x00000020 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 0x00000100 + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS +) + +// Service State -- for CurrentState +const ( + SERVICE_STOPPED = 0x00000001 + SERVICE_START_PENDING = 0x00000002 + SERVICE_STOP_PENDING = 0x00000003 + SERVICE_RUNNING = 0x00000004 + SERVICE_CONTINUE_PENDING = 0x00000005 + SERVICE_PAUSE_PENDING = 0x00000006 + SERVICE_PAUSED = 0x00000007 +) + +// Controls Accepted (Bit Mask) +const ( + SERVICE_ACCEPT_STOP = 0x00000001 + SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002 + SERVICE_ACCEPT_SHUTDOWN = 0x00000004 + SERVICE_ACCEPT_PARAMCHANGE = 0x00000008 + SERVICE_ACCEPT_NETBINDCHANGE = 0x00000010 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020 + SERVICE_ACCEPT_POWEREVENT = 0x00000040 + SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080 + SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100 + SERVICE_ACCEPT_TIMECHANGE = 0x00000200 + SERVICE_ACCEPT_TRIGGEREVENT = 0x00000400 +) + +// Service object specific access type +const ( + SERVICE_QUERY_CONFIG = 0x0001 + SERVICE_CHANGE_CONFIG = 0x0002 + SERVICE_QUERY_STATUS = 0x0004 + SERVICE_ENUMERATE_DEPENDENTS = 0x0008 + SERVICE_START = 0x0010 + SERVICE_STOP = 0x0020 + SERVICE_PAUSE_CONTINUE = 0x0040 + SERVICE_INTERROGATE = 0x0080 + SERVICE_USER_DEFINED_CONTROL = 0x0100 + + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + SERVICE_QUERY_CONFIG | + SERVICE_CHANGE_CONFIG | + SERVICE_QUERY_STATUS | + SERVICE_ENUMERATE_DEPENDENTS | + SERVICE_START | + SERVICE_STOP | + SERVICE_PAUSE_CONTINUE | + SERVICE_INTERROGATE | + SERVICE_USER_DEFINED_CONTROL +) + +// MapVirtualKey maptypes +const ( + MAPVK_VK_TO_CHAR = 2 + MAPVK_VK_TO_VSC = 0 + MAPVK_VSC_TO_VK = 1 + MAPVK_VSC_TO_VK_EX = 3 +) + +// ReadEventLog Flags +const ( + EVENTLOG_SEEK_READ = 0x0002 + EVENTLOG_SEQUENTIAL_READ = 0x0001 + EVENTLOG_FORWARDS_READ = 0x0004 + EVENTLOG_BACKWARDS_READ = 0x0008 +) + +// CreateToolhelp32Snapshot flags +const ( + TH32CS_SNAPHEAPLIST = 0x00000001 + TH32CS_SNAPPROCESS = 0x00000002 + TH32CS_SNAPTHREAD = 0x00000004 + TH32CS_SNAPMODULE = 0x00000008 + TH32CS_SNAPMODULE32 = 0x00000010 + TH32CS_INHERIT = 0x80000000 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD +) + +const ( + MAX_MODULE_NAME32 = 255 + MAX_PATH = 260 +) + +const ( + FOREGROUND_BLUE = 0x0001 + FOREGROUND_GREEN = 0x0002 + FOREGROUND_RED = 0x0004 + FOREGROUND_INTENSITY = 0x0008 + BACKGROUND_BLUE = 0x0010 + BACKGROUND_GREEN = 0x0020 + BACKGROUND_RED = 0x0040 + BACKGROUND_INTENSITY = 0x0080 + COMMON_LVB_LEADING_BYTE = 0x0100 + COMMON_LVB_TRAILING_BYTE = 0x0200 + COMMON_LVB_GRID_HORIZONTAL = 0x0400 + COMMON_LVB_GRID_LVERTICAL = 0x0800 + COMMON_LVB_GRID_RVERTICAL = 0x1000 + COMMON_LVB_REVERSE_VIDEO = 0x4000 + COMMON_LVB_UNDERSCORE = 0x8000 +) + +// Flags used by the DWM_BLURBEHIND structure to indicate +// which of its members contain valid information. +const ( + DWM_BB_ENABLE = 0x00000001 // A value for the fEnable member has been specified. + DWM_BB_BLURREGION = 0x00000002 // A value for the hRgnBlur member has been specified. + DWM_BB_TRANSITIONONMAXIMIZED = 0x00000004 // A value for the fTransitionOnMaximized member has been specified. +) + +// Flags used by the DwmEnableComposition function +// to change the state of Desktop Window Manager (DWM) composition. +const ( + DWM_EC_DISABLECOMPOSITION = 0 // Disable composition + DWM_EC_ENABLECOMPOSITION = 1 // Enable composition +) + +// enum-lite implementation for the following constant structure +type DWM_SHOWCONTACT int32 + +const ( + DWMSC_DOWN = 0x00000001 + DWMSC_UP = 0x00000002 + DWMSC_DRAG = 0x00000004 + DWMSC_HOLD = 0x00000008 + DWMSC_PENBARREL = 0x00000010 + DWMSC_NONE = 0x00000000 + DWMSC_ALL = 0xFFFFFFFF +) + +// enum-lite implementation for the following constant structure +type DWM_SOURCE_FRAME_SAMPLING int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetPresentParameters function +// to specify the frame sampling type +const ( + DWM_SOURCE_FRAME_SAMPLING_POINT = iota + 1 + DWM_SOURCE_FRAME_SAMPLING_COVERAGE + DWM_SOURCE_FRAME_SAMPLING_LAST +) + +// Flags used by the DWM_THUMBNAIL_PROPERTIES structure to +// indicate which of its members contain valid information. +const ( + DWM_TNP_RECTDESTINATION = 0x00000001 // A value for the rcDestination member has been specified + DWM_TNP_RECTSOURCE = 0x00000002 // A value for the rcSource member has been specified + DWM_TNP_OPACITY = 0x00000004 // A value for the opacity member has been specified + DWM_TNP_VISIBLE = 0x00000008 // A value for the fVisible member has been specified + DWM_TNP_SOURCECLIENTAREAONLY = 0x00000010 // A value for the fSourceClientAreaOnly member has been specified +) + +// enum-lite implementation for the following constant structure +type DWMFLIP3DWINDOWPOLICY int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetWindowAttribute function +// to specify the Flip3D window policy +const ( + DWMFLIP3D_DEFAULT = iota + 1 + DWMFLIP3D_EXCLUDEBELOW + DWMFLIP3D_EXCLUDEABOVE + DWMFLIP3D_LAST +) + +// enum-lite implementation for the following constant structure +type DWMNCRENDERINGPOLICY int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetWindowAttribute function +// to specify the non-client area rendering policy +const ( + DWMNCRP_USEWINDOWSTYLE = iota + 1 + DWMNCRP_DISABLED + DWMNCRP_ENABLED + DWMNCRP_LAST +) + +// enum-lite implementation for the following constant structure +type DWMTRANSITION_OWNEDWINDOW_TARGET int32 + +const ( + DWMTRANSITION_OWNEDWINDOW_NULL = -1 + DWMTRANSITION_OWNEDWINDOW_REPOSITION = 0 +) + +// enum-lite implementation for the following constant structure +type DWMWINDOWATTRIBUTE int32 + +// TODO: need to verify this construction +// Flags used by the DwmGetWindowAttribute and DwmSetWindowAttribute functions +// to specify window attributes for non-client rendering +const ( + DWMWA_NCRENDERING_ENABLED = iota + 1 + DWMWA_NCRENDERING_POLICY + DWMWA_TRANSITIONS_FORCEDISABLED + DWMWA_ALLOW_NCPAINT + DWMWA_CAPTION_BUTTON_BOUNDS + DWMWA_NONCLIENT_RTL_LAYOUT + DWMWA_FORCE_ICONIC_REPRESENTATION + DWMWA_FLIP3D_POLICY + DWMWA_EXTENDED_FRAME_BOUNDS + DWMWA_HAS_ICONIC_BITMAP + DWMWA_DISALLOW_PEEK + DWMWA_EXCLUDED_FROM_PEEK + DWMWA_CLOAK + DWMWA_CLOAKED + DWMWA_FREEZE_REPRESENTATION + DWMWA_LAST +) + +// enum-lite implementation for the following constant structure +type GESTURE_TYPE int32 + +// TODO: use iota? +// Identifies the gesture type +const ( + GT_PEN_TAP = 0 + GT_PEN_DOUBLETAP = 1 + GT_PEN_RIGHTTAP = 2 + GT_PEN_PRESSANDHOLD = 3 + GT_PEN_PRESSANDHOLDABORT = 4 + GT_TOUCH_TAP = 5 + GT_TOUCH_DOUBLETAP = 6 + GT_TOUCH_RIGHTTAP = 7 + GT_TOUCH_PRESSANDHOLD = 8 + GT_TOUCH_PRESSANDHOLDABORT = 9 + GT_TOUCH_PRESSANDTAP = 10 +) + +// Icons +const ( + ICON_SMALL = 0 + ICON_BIG = 1 + ICON_SMALL2 = 2 +) + +const ( + SIZE_RESTORED = 0 + SIZE_MINIMIZED = 1 + SIZE_MAXIMIZED = 2 + SIZE_MAXSHOW = 3 + SIZE_MAXHIDE = 4 +) + +// XButton values +const ( + XBUTTON1 = 1 + XBUTTON2 = 2 +) + +// Devmode +const ( + DM_SPECVERSION = 0x0401 + + DM_ORIENTATION = 0x00000001 + DM_PAPERSIZE = 0x00000002 + DM_PAPERLENGTH = 0x00000004 + DM_PAPERWIDTH = 0x00000008 + DM_SCALE = 0x00000010 + DM_POSITION = 0x00000020 + DM_NUP = 0x00000040 + DM_DISPLAYORIENTATION = 0x00000080 + DM_COPIES = 0x00000100 + DM_DEFAULTSOURCE = 0x00000200 + DM_PRINTQUALITY = 0x00000400 + DM_COLOR = 0x00000800 + DM_DUPLEX = 0x00001000 + DM_YRESOLUTION = 0x00002000 + DM_TTOPTION = 0x00004000 + DM_COLLATE = 0x00008000 + DM_FORMNAME = 0x00010000 + DM_LOGPIXELS = 0x00020000 + DM_BITSPERPEL = 0x00040000 + DM_PELSWIDTH = 0x00080000 + DM_PELSHEIGHT = 0x00100000 + DM_DISPLAYFLAGS = 0x00200000 + DM_DISPLAYFREQUENCY = 0x00400000 + DM_ICMMETHOD = 0x00800000 + DM_ICMINTENT = 0x01000000 + DM_MEDIATYPE = 0x02000000 + DM_DITHERTYPE = 0x04000000 + DM_PANNINGWIDTH = 0x08000000 + DM_PANNINGHEIGHT = 0x10000000 + DM_DISPLAYFIXEDOUTPUT = 0x20000000 +) + +// ChangeDisplaySettings +const ( + CDS_UPDATEREGISTRY = 0x00000001 + CDS_TEST = 0x00000002 + CDS_FULLSCREEN = 0x00000004 + CDS_GLOBAL = 0x00000008 + CDS_SET_PRIMARY = 0x00000010 + CDS_VIDEOPARAMETERS = 0x00000020 + CDS_RESET = 0x40000000 + CDS_NORESET = 0x10000000 + + DISP_CHANGE_SUCCESSFUL = 0 + DISP_CHANGE_RESTART = 1 + DISP_CHANGE_FAILED = -1 + DISP_CHANGE_BADMODE = -2 + DISP_CHANGE_NOTUPDATED = -3 + DISP_CHANGE_BADFLAGS = -4 + DISP_CHANGE_BADPARAM = -5 + DISP_CHANGE_BADDUALVIEW = -6 +) + +const ( + ENUM_CURRENT_SETTINGS = 0xFFFFFFFF + ENUM_REGISTRY_SETTINGS = 0xFFFFFFFE +) + +// PIXELFORMATDESCRIPTOR +const ( + PFD_TYPE_RGBA = 0 + PFD_TYPE_COLORINDEX = 1 + + PFD_MAIN_PLANE = 0 + PFD_OVERLAY_PLANE = 1 + PFD_UNDERLAY_PLANE = -1 + + PFD_DOUBLEBUFFER = 0x00000001 + PFD_STEREO = 0x00000002 + PFD_DRAW_TO_WINDOW = 0x00000004 + PFD_DRAW_TO_BITMAP = 0x00000008 + PFD_SUPPORT_GDI = 0x00000010 + PFD_SUPPORT_OPENGL = 0x00000020 + PFD_GENERIC_FORMAT = 0x00000040 + PFD_NEED_PALETTE = 0x00000080 + PFD_NEED_SYSTEM_PALETTE = 0x00000100 + PFD_SWAP_EXCHANGE = 0x00000200 + PFD_SWAP_COPY = 0x00000400 + PFD_SWAP_LAYER_BUFFERS = 0x00000800 + PFD_GENERIC_ACCELERATED = 0x00001000 + PFD_SUPPORT_DIRECTDRAW = 0x00002000 + PFD_DIRECT3D_ACCELERATED = 0x00004000 + PFD_SUPPORT_COMPOSITION = 0x00008000 + + PFD_DEPTH_DONTCARE = 0x20000000 + PFD_DOUBLEBUFFER_DONTCARE = 0x40000000 + PFD_STEREO_DONTCARE = 0x80000000 +) + +const ( + INPUT_MOUSE = 0 + INPUT_KEYBOARD = 1 + INPUT_HARDWARE = 2 +) + +const ( + MOUSEEVENTF_ABSOLUTE = 0x8000 + MOUSEEVENTF_HWHEEL = 0x01000 + MOUSEEVENTF_MOVE = 0x0001 + MOUSEEVENTF_MOVE_NOCOALESCE = 0x2000 + MOUSEEVENTF_LEFTDOWN = 0x0002 + MOUSEEVENTF_LEFTUP = 0x0004 + MOUSEEVENTF_RIGHTDOWN = 0x0008 + MOUSEEVENTF_RIGHTUP = 0x0010 + MOUSEEVENTF_MIDDLEDOWN = 0x0020 + MOUSEEVENTF_MIDDLEUP = 0x0040 + MOUSEEVENTF_VIRTUALDESK = 0x4000 + MOUSEEVENTF_WHEEL = 0x0800 + MOUSEEVENTF_XDOWN = 0x0080 + MOUSEEVENTF_XUP = 0x0100 +) diff --git a/vendor/github.com/shirou/w32/dwmapi.go b/vendor/github.com/shirou/w32/dwmapi.go new file mode 100644 index 00000000..139b9374 --- /dev/null +++ b/vendor/github.com/shirou/w32/dwmapi.go @@ -0,0 +1,256 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "fmt" + "syscall" + "unsafe" +) + +// DEFINED IN THE DWM API BUT NOT IMPLEMENTED BY MS: +// DwmAttachMilContent +// DwmDetachMilContent +// DwmEnableComposition +// DwmGetGraphicsStreamClient +// DwmGetGraphicsStreamTransformHint + +var ( + moddwmapi = syscall.NewLazyDLL("dwmapi.dll") + + procDwmDefWindowProc = moddwmapi.NewProc("DwmDefWindowProc") + procDwmEnableBlurBehindWindow = moddwmapi.NewProc("DwmEnableBlurBehindWindow") + procDwmEnableMMCSS = moddwmapi.NewProc("DwmEnableMMCSS") + procDwmExtendFrameIntoClientArea = moddwmapi.NewProc("DwmExtendFrameIntoClientArea") + procDwmFlush = moddwmapi.NewProc("DwmFlush") + procDwmGetColorizationColor = moddwmapi.NewProc("DwmGetColorizationColor") + procDwmGetCompositionTimingInfo = moddwmapi.NewProc("DwmGetCompositionTimingInfo") + procDwmGetTransportAttributes = moddwmapi.NewProc("DwmGetTransportAttributes") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmInvalidateIconicBitmaps = moddwmapi.NewProc("DwmInvalidateIconicBitmaps") + procDwmIsCompositionEnabled = moddwmapi.NewProc("DwmIsCompositionEnabled") + procDwmModifyPreviousDxFrameDuration = moddwmapi.NewProc("DwmModifyPreviousDxFrameDuration") + procDwmQueryThumbnailSourceSize = moddwmapi.NewProc("DwmQueryThumbnailSourceSize") + procDwmRegisterThumbnail = moddwmapi.NewProc("DwmRegisterThumbnail") + procDwmRenderGesture = moddwmapi.NewProc("DwmRenderGesture") + procDwmSetDxFrameDuration = moddwmapi.NewProc("DwmSetDxFrameDuration") + procDwmSetIconicLivePreviewBitmap = moddwmapi.NewProc("DwmSetIconicLivePreviewBitmap") + procDwmSetIconicThumbnail = moddwmapi.NewProc("DwmSetIconicThumbnail") + procDwmSetPresentParameters = moddwmapi.NewProc("DwmSetPresentParameters") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procDwmShowContact = moddwmapi.NewProc("DwmShowContact") + procDwmTetherContact = moddwmapi.NewProc("DwmTetherContact") + procDwmTransitionOwnedWindow = moddwmapi.NewProc("DwmTransitionOwnedWindow") + procDwmUnregisterThumbnail = moddwmapi.NewProc("DwmUnregisterThumbnail") + procDwmUpdateThumbnailProperties = moddwmapi.NewProc("DwmUpdateThumbnailProperties") +) + +func DwmDefWindowProc(hWnd HWND, msg uint, wParam, lParam uintptr) (bool, uint) { + var result uint + ret, _, _ := procDwmDefWindowProc.Call( + uintptr(hWnd), + uintptr(msg), + wParam, + lParam, + uintptr(unsafe.Pointer(&result))) + return ret != 0, result +} + +func DwmEnableBlurBehindWindow(hWnd HWND, pBlurBehind *DWM_BLURBEHIND) HRESULT { + ret, _, _ := procDwmEnableBlurBehindWindow.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pBlurBehind))) + return HRESULT(ret) +} + +func DwmEnableMMCSS(fEnableMMCSS bool) HRESULT { + ret, _, _ := procDwmEnableMMCSS.Call( + uintptr(BoolToBOOL(fEnableMMCSS))) + return HRESULT(ret) +} + +func DwmExtendFrameIntoClientArea(hWnd HWND, pMarInset *MARGINS) HRESULT { + ret, _, _ := procDwmExtendFrameIntoClientArea.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pMarInset))) + return HRESULT(ret) +} + +func DwmFlush() HRESULT { + ret, _, _ := procDwmFlush.Call() + return HRESULT(ret) +} + +func DwmGetColorizationColor(pcrColorization *uint32, pfOpaqueBlend *BOOL) HRESULT { + ret, _, _ := procDwmGetColorizationColor.Call( + uintptr(unsafe.Pointer(pcrColorization)), + uintptr(unsafe.Pointer(pfOpaqueBlend))) + return HRESULT(ret) +} + +func DwmGetCompositionTimingInfo(hWnd HWND, pTimingInfo *DWM_TIMING_INFO) HRESULT { + ret, _, _ := procDwmGetCompositionTimingInfo.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pTimingInfo))) + return HRESULT(ret) +} + +func DwmGetTransportAttributes(pfIsRemoting *BOOL, pfIsConnected *BOOL, pDwGeneration *uint32) HRESULT { + ret, _, _ := procDwmGetTransportAttributes.Call( + uintptr(unsafe.Pointer(pfIsRemoting)), + uintptr(unsafe.Pointer(pfIsConnected)), + uintptr(unsafe.Pointer(pDwGeneration))) + return HRESULT(ret) +} + +// TODO: verify handling of variable arguments +func DwmGetWindowAttribute(hWnd HWND, dwAttribute uint32) (pAttribute interface{}, result HRESULT) { + var pvAttribute, pvAttrSize uintptr + switch dwAttribute { + case DWMWA_NCRENDERING_ENABLED: + v := new(BOOL) + pAttribute = v + pvAttribute = uintptr(unsafe.Pointer(v)) + pvAttrSize = unsafe.Sizeof(*v) + case DWMWA_CAPTION_BUTTON_BOUNDS, DWMWA_EXTENDED_FRAME_BOUNDS: + v := new(RECT) + pAttribute = v + pvAttribute = uintptr(unsafe.Pointer(v)) + pvAttrSize = unsafe.Sizeof(*v) + case DWMWA_CLOAKED: + panic(fmt.Sprintf("DwmGetWindowAttribute(%d) is not currently supported.", dwAttribute)) + default: + panic(fmt.Sprintf("DwmGetWindowAttribute(%d) is not valid.", dwAttribute)) + } + + ret, _, _ := procDwmGetWindowAttribute.Call( + uintptr(hWnd), + uintptr(dwAttribute), + pvAttribute, + pvAttrSize) + result = HRESULT(ret) + return +} + +func DwmInvalidateIconicBitmaps(hWnd HWND) HRESULT { + ret, _, _ := procDwmInvalidateIconicBitmaps.Call( + uintptr(hWnd)) + return HRESULT(ret) +} + +func DwmIsCompositionEnabled(pfEnabled *BOOL) HRESULT { + ret, _, _ := procDwmIsCompositionEnabled.Call( + uintptr(unsafe.Pointer(pfEnabled))) + return HRESULT(ret) +} + +func DwmModifyPreviousDxFrameDuration(hWnd HWND, cRefreshes int, fRelative bool) HRESULT { + ret, _, _ := procDwmModifyPreviousDxFrameDuration.Call( + uintptr(hWnd), + uintptr(cRefreshes), + uintptr(BoolToBOOL(fRelative))) + return HRESULT(ret) +} + +func DwmQueryThumbnailSourceSize(hThumbnail HTHUMBNAIL, pSize *SIZE) HRESULT { + ret, _, _ := procDwmQueryThumbnailSourceSize.Call( + uintptr(hThumbnail), + uintptr(unsafe.Pointer(pSize))) + return HRESULT(ret) +} + +func DwmRegisterThumbnail(hWndDestination HWND, hWndSource HWND, phThumbnailId *HTHUMBNAIL) HRESULT { + ret, _, _ := procDwmRegisterThumbnail.Call( + uintptr(hWndDestination), + uintptr(hWndSource), + uintptr(unsafe.Pointer(phThumbnailId))) + return HRESULT(ret) +} + +func DwmRenderGesture(gt GESTURE_TYPE, cContacts uint, pdwPointerID *uint32, pPoints *POINT) { + procDwmRenderGesture.Call( + uintptr(gt), + uintptr(cContacts), + uintptr(unsafe.Pointer(pdwPointerID)), + uintptr(unsafe.Pointer(pPoints))) + return +} + +func DwmSetDxFrameDuration(hWnd HWND, cRefreshes int) HRESULT { + ret, _, _ := procDwmSetDxFrameDuration.Call( + uintptr(hWnd), + uintptr(cRefreshes)) + return HRESULT(ret) +} + +func DwmSetIconicLivePreviewBitmap(hWnd HWND, hbmp HBITMAP, pptClient *POINT, dwSITFlags uint32) HRESULT { + ret, _, _ := procDwmSetIconicLivePreviewBitmap.Call( + uintptr(hWnd), + uintptr(hbmp), + uintptr(unsafe.Pointer(pptClient)), + uintptr(dwSITFlags)) + return HRESULT(ret) +} + +func DwmSetIconicThumbnail(hWnd HWND, hbmp HBITMAP, dwSITFlags uint32) HRESULT { + ret, _, _ := procDwmSetIconicThumbnail.Call( + uintptr(hWnd), + uintptr(hbmp), + uintptr(dwSITFlags)) + return HRESULT(ret) +} + +func DwmSetPresentParameters(hWnd HWND, pPresentParams *DWM_PRESENT_PARAMETERS) HRESULT { + ret, _, _ := procDwmSetPresentParameters.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pPresentParams))) + return HRESULT(ret) +} + +func DwmSetWindowAttribute(hWnd HWND, dwAttribute uint32, pvAttribute LPCVOID, cbAttribute uint32) HRESULT { + ret, _, _ := procDwmSetWindowAttribute.Call( + uintptr(hWnd), + uintptr(dwAttribute), + uintptr(pvAttribute), + uintptr(cbAttribute)) + return HRESULT(ret) +} + +func DwmShowContact(dwPointerID uint32, eShowContact DWM_SHOWCONTACT) { + procDwmShowContact.Call( + uintptr(dwPointerID), + uintptr(eShowContact)) + return +} + +func DwmTetherContact(dwPointerID uint32, fEnable bool, ptTether POINT) { + procDwmTetherContact.Call( + uintptr(dwPointerID), + uintptr(BoolToBOOL(fEnable)), + uintptr(unsafe.Pointer(&ptTether))) + return +} + +func DwmTransitionOwnedWindow(hWnd HWND, target DWMTRANSITION_OWNEDWINDOW_TARGET) { + procDwmTransitionOwnedWindow.Call( + uintptr(hWnd), + uintptr(target)) + return +} + +func DwmUnregisterThumbnail(hThumbnailId HTHUMBNAIL) HRESULT { + ret, _, _ := procDwmUnregisterThumbnail.Call( + uintptr(hThumbnailId)) + return HRESULT(ret) +} + +func DwmUpdateThumbnailProperties(hThumbnailId HTHUMBNAIL, ptnProperties *DWM_THUMBNAIL_PROPERTIES) HRESULT { + ret, _, _ := procDwmUpdateThumbnailProperties.Call( + uintptr(hThumbnailId), + uintptr(unsafe.Pointer(ptnProperties))) + return HRESULT(ret) +} diff --git a/vendor/github.com/shirou/w32/gdi32.go b/vendor/github.com/shirou/w32/gdi32.go new file mode 100644 index 00000000..34f032c7 --- /dev/null +++ b/vendor/github.com/shirou/w32/gdi32.go @@ -0,0 +1,511 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modgdi32 = syscall.NewLazyDLL("gdi32.dll") + + procGetDeviceCaps = modgdi32.NewProc("GetDeviceCaps") + procDeleteObject = modgdi32.NewProc("DeleteObject") + procCreateFontIndirect = modgdi32.NewProc("CreateFontIndirectW") + procAbortDoc = modgdi32.NewProc("AbortDoc") + procBitBlt = modgdi32.NewProc("BitBlt") + procCloseEnhMetaFile = modgdi32.NewProc("CloseEnhMetaFile") + procCopyEnhMetaFile = modgdi32.NewProc("CopyEnhMetaFileW") + procCreateBrushIndirect = modgdi32.NewProc("CreateBrushIndirect") + procCreateCompatibleDC = modgdi32.NewProc("CreateCompatibleDC") + procCreateDC = modgdi32.NewProc("CreateDCW") + procCreateDIBSection = modgdi32.NewProc("CreateDIBSection") + procCreateEnhMetaFile = modgdi32.NewProc("CreateEnhMetaFileW") + procCreateIC = modgdi32.NewProc("CreateICW") + procDeleteDC = modgdi32.NewProc("DeleteDC") + procDeleteEnhMetaFile = modgdi32.NewProc("DeleteEnhMetaFile") + procEllipse = modgdi32.NewProc("Ellipse") + procEndDoc = modgdi32.NewProc("EndDoc") + procEndPage = modgdi32.NewProc("EndPage") + procExtCreatePen = modgdi32.NewProc("ExtCreatePen") + procGetEnhMetaFile = modgdi32.NewProc("GetEnhMetaFileW") + procGetEnhMetaFileHeader = modgdi32.NewProc("GetEnhMetaFileHeader") + procGetObject = modgdi32.NewProc("GetObjectW") + procGetStockObject = modgdi32.NewProc("GetStockObject") + procGetTextExtentExPoint = modgdi32.NewProc("GetTextExtentExPointW") + procGetTextExtentPoint32 = modgdi32.NewProc("GetTextExtentPoint32W") + procGetTextMetrics = modgdi32.NewProc("GetTextMetricsW") + procLineTo = modgdi32.NewProc("LineTo") + procMoveToEx = modgdi32.NewProc("MoveToEx") + procPlayEnhMetaFile = modgdi32.NewProc("PlayEnhMetaFile") + procRectangle = modgdi32.NewProc("Rectangle") + procResetDC = modgdi32.NewProc("ResetDCW") + procSelectObject = modgdi32.NewProc("SelectObject") + procSetBkMode = modgdi32.NewProc("SetBkMode") + procSetBrushOrgEx = modgdi32.NewProc("SetBrushOrgEx") + procSetStretchBltMode = modgdi32.NewProc("SetStretchBltMode") + procSetTextColor = modgdi32.NewProc("SetTextColor") + procSetBkColor = modgdi32.NewProc("SetBkColor") + procStartDoc = modgdi32.NewProc("StartDocW") + procStartPage = modgdi32.NewProc("StartPage") + procStretchBlt = modgdi32.NewProc("StretchBlt") + procSetDIBitsToDevice = modgdi32.NewProc("SetDIBitsToDevice") + procChoosePixelFormat = modgdi32.NewProc("ChoosePixelFormat") + procDescribePixelFormat = modgdi32.NewProc("DescribePixelFormat") + procGetEnhMetaFilePixelFormat = modgdi32.NewProc("GetEnhMetaFilePixelFormat") + procGetPixelFormat = modgdi32.NewProc("GetPixelFormat") + procSetPixelFormat = modgdi32.NewProc("SetPixelFormat") + procSwapBuffers = modgdi32.NewProc("SwapBuffers") +) + +func GetDeviceCaps(hdc HDC, index int) int { + ret, _, _ := procGetDeviceCaps.Call( + uintptr(hdc), + uintptr(index)) + + return int(ret) +} + +func DeleteObject(hObject HGDIOBJ) bool { + ret, _, _ := procDeleteObject.Call( + uintptr(hObject)) + + return ret != 0 +} + +func CreateFontIndirect(logFont *LOGFONT) HFONT { + ret, _, _ := procCreateFontIndirect.Call( + uintptr(unsafe.Pointer(logFont))) + + return HFONT(ret) +} + +func AbortDoc(hdc HDC) int { + ret, _, _ := procAbortDoc.Call( + uintptr(hdc)) + + return int(ret) +} + +func BitBlt(hdcDest HDC, nXDest, nYDest, nWidth, nHeight int, hdcSrc HDC, nXSrc, nYSrc int, dwRop uint) { + ret, _, _ := procBitBlt.Call( + uintptr(hdcDest), + uintptr(nXDest), + uintptr(nYDest), + uintptr(nWidth), + uintptr(nHeight), + uintptr(hdcSrc), + uintptr(nXSrc), + uintptr(nYSrc), + uintptr(dwRop)) + + if ret == 0 { + panic("BitBlt failed") + } +} + +func CloseEnhMetaFile(hdc HDC) HENHMETAFILE { + ret, _, _ := procCloseEnhMetaFile.Call( + uintptr(hdc)) + + return HENHMETAFILE(ret) +} + +func CopyEnhMetaFile(hemfSrc HENHMETAFILE, lpszFile *uint16) HENHMETAFILE { + ret, _, _ := procCopyEnhMetaFile.Call( + uintptr(hemfSrc), + uintptr(unsafe.Pointer(lpszFile))) + + return HENHMETAFILE(ret) +} + +func CreateBrushIndirect(lplb *LOGBRUSH) HBRUSH { + ret, _, _ := procCreateBrushIndirect.Call( + uintptr(unsafe.Pointer(lplb))) + + return HBRUSH(ret) +} + +func CreateCompatibleDC(hdc HDC) HDC { + ret, _, _ := procCreateCompatibleDC.Call( + uintptr(hdc)) + + if ret == 0 { + panic("Create compatible DC failed") + } + + return HDC(ret) +} + +func CreateDC(lpszDriver, lpszDevice, lpszOutput *uint16, lpInitData *DEVMODE) HDC { + ret, _, _ := procCreateDC.Call( + uintptr(unsafe.Pointer(lpszDriver)), + uintptr(unsafe.Pointer(lpszDevice)), + uintptr(unsafe.Pointer(lpszOutput)), + uintptr(unsafe.Pointer(lpInitData))) + + return HDC(ret) +} + +func CreateDIBSection(hdc HDC, pbmi *BITMAPINFO, iUsage uint, ppvBits *unsafe.Pointer, hSection HANDLE, dwOffset uint) HBITMAP { + ret, _, _ := procCreateDIBSection.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(pbmi)), + uintptr(iUsage), + uintptr(unsafe.Pointer(ppvBits)), + uintptr(hSection), + uintptr(dwOffset)) + + return HBITMAP(ret) +} + +func CreateEnhMetaFile(hdcRef HDC, lpFilename *uint16, lpRect *RECT, lpDescription *uint16) HDC { + ret, _, _ := procCreateEnhMetaFile.Call( + uintptr(hdcRef), + uintptr(unsafe.Pointer(lpFilename)), + uintptr(unsafe.Pointer(lpRect)), + uintptr(unsafe.Pointer(lpDescription))) + + return HDC(ret) +} + +func CreateIC(lpszDriver, lpszDevice, lpszOutput *uint16, lpdvmInit *DEVMODE) HDC { + ret, _, _ := procCreateIC.Call( + uintptr(unsafe.Pointer(lpszDriver)), + uintptr(unsafe.Pointer(lpszDevice)), + uintptr(unsafe.Pointer(lpszOutput)), + uintptr(unsafe.Pointer(lpdvmInit))) + + return HDC(ret) +} + +func DeleteDC(hdc HDC) bool { + ret, _, _ := procDeleteDC.Call( + uintptr(hdc)) + + return ret != 0 +} + +func DeleteEnhMetaFile(hemf HENHMETAFILE) bool { + ret, _, _ := procDeleteEnhMetaFile.Call( + uintptr(hemf)) + + return ret != 0 +} + +func Ellipse(hdc HDC, nLeftRect, nTopRect, nRightRect, nBottomRect int) bool { + ret, _, _ := procEllipse.Call( + uintptr(hdc), + uintptr(nLeftRect), + uintptr(nTopRect), + uintptr(nRightRect), + uintptr(nBottomRect)) + + return ret != 0 +} + +func EndDoc(hdc HDC) int { + ret, _, _ := procEndDoc.Call( + uintptr(hdc)) + + return int(ret) +} + +func EndPage(hdc HDC) int { + ret, _, _ := procEndPage.Call( + uintptr(hdc)) + + return int(ret) +} + +func ExtCreatePen(dwPenStyle, dwWidth uint, lplb *LOGBRUSH, dwStyleCount uint, lpStyle *uint) HPEN { + ret, _, _ := procExtCreatePen.Call( + uintptr(dwPenStyle), + uintptr(dwWidth), + uintptr(unsafe.Pointer(lplb)), + uintptr(dwStyleCount), + uintptr(unsafe.Pointer(lpStyle))) + + return HPEN(ret) +} + +func GetEnhMetaFile(lpszMetaFile *uint16) HENHMETAFILE { + ret, _, _ := procGetEnhMetaFile.Call( + uintptr(unsafe.Pointer(lpszMetaFile))) + + return HENHMETAFILE(ret) +} + +func GetEnhMetaFileHeader(hemf HENHMETAFILE, cbBuffer uint, lpemh *ENHMETAHEADER) uint { + ret, _, _ := procGetEnhMetaFileHeader.Call( + uintptr(hemf), + uintptr(cbBuffer), + uintptr(unsafe.Pointer(lpemh))) + + return uint(ret) +} + +func GetObject(hgdiobj HGDIOBJ, cbBuffer uintptr, lpvObject unsafe.Pointer) int { + ret, _, _ := procGetObject.Call( + uintptr(hgdiobj), + uintptr(cbBuffer), + uintptr(lpvObject)) + + return int(ret) +} + +func GetStockObject(fnObject int) HGDIOBJ { + ret, _, _ := procGetDeviceCaps.Call( + uintptr(fnObject)) + + return HGDIOBJ(ret) +} + +func GetTextExtentExPoint(hdc HDC, lpszStr *uint16, cchString, nMaxExtent int, lpnFit, alpDx *int, lpSize *SIZE) bool { + ret, _, _ := procGetTextExtentExPoint.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpszStr)), + uintptr(cchString), + uintptr(nMaxExtent), + uintptr(unsafe.Pointer(lpnFit)), + uintptr(unsafe.Pointer(alpDx)), + uintptr(unsafe.Pointer(lpSize))) + + return ret != 0 +} + +func GetTextExtentPoint32(hdc HDC, lpString *uint16, c int, lpSize *SIZE) bool { + ret, _, _ := procGetTextExtentPoint32.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpString)), + uintptr(c), + uintptr(unsafe.Pointer(lpSize))) + + return ret != 0 +} + +func GetTextMetrics(hdc HDC, lptm *TEXTMETRIC) bool { + ret, _, _ := procGetTextMetrics.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lptm))) + + return ret != 0 +} + +func LineTo(hdc HDC, nXEnd, nYEnd int) bool { + ret, _, _ := procLineTo.Call( + uintptr(hdc), + uintptr(nXEnd), + uintptr(nYEnd)) + + return ret != 0 +} + +func MoveToEx(hdc HDC, x, y int, lpPoint *POINT) bool { + ret, _, _ := procMoveToEx.Call( + uintptr(hdc), + uintptr(x), + uintptr(y), + uintptr(unsafe.Pointer(lpPoint))) + + return ret != 0 +} + +func PlayEnhMetaFile(hdc HDC, hemf HENHMETAFILE, lpRect *RECT) bool { + ret, _, _ := procPlayEnhMetaFile.Call( + uintptr(hdc), + uintptr(hemf), + uintptr(unsafe.Pointer(lpRect))) + + return ret != 0 +} + +func Rectangle(hdc HDC, nLeftRect, nTopRect, nRightRect, nBottomRect int) bool { + ret, _, _ := procRectangle.Call( + uintptr(hdc), + uintptr(nLeftRect), + uintptr(nTopRect), + uintptr(nRightRect), + uintptr(nBottomRect)) + + return ret != 0 +} + +func ResetDC(hdc HDC, lpInitData *DEVMODE) HDC { + ret, _, _ := procResetDC.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpInitData))) + + return HDC(ret) +} + +func SelectObject(hdc HDC, hgdiobj HGDIOBJ) HGDIOBJ { + ret, _, _ := procSelectObject.Call( + uintptr(hdc), + uintptr(hgdiobj)) + + if ret == 0 { + panic("SelectObject failed") + } + + return HGDIOBJ(ret) +} + +func SetBkMode(hdc HDC, iBkMode int) int { + ret, _, _ := procSetBkMode.Call( + uintptr(hdc), + uintptr(iBkMode)) + + if ret == 0 { + panic("SetBkMode failed") + } + + return int(ret) +} + +func SetBrushOrgEx(hdc HDC, nXOrg, nYOrg int, lppt *POINT) bool { + ret, _, _ := procSetBrushOrgEx.Call( + uintptr(hdc), + uintptr(nXOrg), + uintptr(nYOrg), + uintptr(unsafe.Pointer(lppt))) + + return ret != 0 +} + +func SetStretchBltMode(hdc HDC, iStretchMode int) int { + ret, _, _ := procSetStretchBltMode.Call( + uintptr(hdc), + uintptr(iStretchMode)) + + return int(ret) +} + +func SetTextColor(hdc HDC, crColor COLORREF) COLORREF { + ret, _, _ := procSetTextColor.Call( + uintptr(hdc), + uintptr(crColor)) + + if ret == CLR_INVALID { + panic("SetTextColor failed") + } + + return COLORREF(ret) +} + +func SetBkColor(hdc HDC, crColor COLORREF) COLORREF { + ret, _, _ := procSetBkColor.Call( + uintptr(hdc), + uintptr(crColor)) + + if ret == CLR_INVALID { + panic("SetBkColor failed") + } + + return COLORREF(ret) +} + +func StartDoc(hdc HDC, lpdi *DOCINFO) int { + ret, _, _ := procStartDoc.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpdi))) + + return int(ret) +} + +func StartPage(hdc HDC) int { + ret, _, _ := procStartPage.Call( + uintptr(hdc)) + + return int(ret) +} + +func StretchBlt(hdcDest HDC, nXOriginDest, nYOriginDest, nWidthDest, nHeightDest int, hdcSrc HDC, nXOriginSrc, nYOriginSrc, nWidthSrc, nHeightSrc int, dwRop uint) { + ret, _, _ := procStretchBlt.Call( + uintptr(hdcDest), + uintptr(nXOriginDest), + uintptr(nYOriginDest), + uintptr(nWidthDest), + uintptr(nHeightDest), + uintptr(hdcSrc), + uintptr(nXOriginSrc), + uintptr(nYOriginSrc), + uintptr(nWidthSrc), + uintptr(nHeightSrc), + uintptr(dwRop)) + + if ret == 0 { + panic("StretchBlt failed") + } +} + +func SetDIBitsToDevice(hdc HDC, xDest, yDest, dwWidth, dwHeight, xSrc, ySrc int, uStartScan, cScanLines uint, lpvBits []byte, lpbmi *BITMAPINFO, fuColorUse uint) int { + ret, _, _ := procSetDIBitsToDevice.Call( + uintptr(hdc), + uintptr(xDest), + uintptr(yDest), + uintptr(dwWidth), + uintptr(dwHeight), + uintptr(xSrc), + uintptr(ySrc), + uintptr(uStartScan), + uintptr(cScanLines), + uintptr(unsafe.Pointer(&lpvBits[0])), + uintptr(unsafe.Pointer(lpbmi)), + uintptr(fuColorUse)) + + return int(ret) +} + +func ChoosePixelFormat(hdc HDC, pfd *PIXELFORMATDESCRIPTOR) int { + ret, _, _ := procChoosePixelFormat.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(pfd)), + ) + return int(ret) +} + +func DescribePixelFormat(hdc HDC, iPixelFormat int, nBytes uint, pfd *PIXELFORMATDESCRIPTOR) int { + ret, _, _ := procDescribePixelFormat.Call( + uintptr(hdc), + uintptr(iPixelFormat), + uintptr(nBytes), + uintptr(unsafe.Pointer(pfd)), + ) + return int(ret) +} + +func GetEnhMetaFilePixelFormat(hemf HENHMETAFILE, cbBuffer uint32, pfd *PIXELFORMATDESCRIPTOR) uint { + ret, _, _ := procGetEnhMetaFilePixelFormat.Call( + uintptr(hemf), + uintptr(cbBuffer), + uintptr(unsafe.Pointer(pfd)), + ) + return uint(ret) +} + +func GetPixelFormat(hdc HDC) int { + ret, _, _ := procGetPixelFormat.Call( + uintptr(hdc), + ) + return int(ret) +} + +func SetPixelFormat(hdc HDC, iPixelFormat int, pfd *PIXELFORMATDESCRIPTOR) bool { + ret, _, _ := procSetPixelFormat.Call( + uintptr(hdc), + uintptr(iPixelFormat), + uintptr(unsafe.Pointer(pfd)), + ) + return ret == TRUE +} + +func SwapBuffers(hdc HDC) bool { + ret, _, _ := procSwapBuffers.Call(uintptr(hdc)) + return ret == TRUE +} diff --git a/vendor/github.com/shirou/w32/gdiplus.go b/vendor/github.com/shirou/w32/gdiplus.go new file mode 100644 index 00000000..443334b0 --- /dev/null +++ b/vendor/github.com/shirou/w32/gdiplus.go @@ -0,0 +1,177 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +const ( + Ok = 0 + GenericError = 1 + InvalidParameter = 2 + OutOfMemory = 3 + ObjectBusy = 4 + InsufficientBuffer = 5 + NotImplemented = 6 + Win32Error = 7 + WrongState = 8 + Aborted = 9 + FileNotFound = 10 + ValueOverflow = 11 + AccessDenied = 12 + UnknownImageFormat = 13 + FontFamilyNotFound = 14 + FontStyleNotFound = 15 + NotTrueTypeFont = 16 + UnsupportedGdiplusVersion = 17 + GdiplusNotInitialized = 18 + PropertyNotFound = 19 + PropertyNotSupported = 20 + ProfileNotFound = 21 +) + +func GetGpStatus(s int32) string { + switch s { + case Ok: + return "Ok" + case GenericError: + return "GenericError" + case InvalidParameter: + return "InvalidParameter" + case OutOfMemory: + return "OutOfMemory" + case ObjectBusy: + return "ObjectBusy" + case InsufficientBuffer: + return "InsufficientBuffer" + case NotImplemented: + return "NotImplemented" + case Win32Error: + return "Win32Error" + case WrongState: + return "WrongState" + case Aborted: + return "Aborted" + case FileNotFound: + return "FileNotFound" + case ValueOverflow: + return "ValueOverflow" + case AccessDenied: + return "AccessDenied" + case UnknownImageFormat: + return "UnknownImageFormat" + case FontFamilyNotFound: + return "FontFamilyNotFound" + case FontStyleNotFound: + return "FontStyleNotFound" + case NotTrueTypeFont: + return "NotTrueTypeFont" + case UnsupportedGdiplusVersion: + return "UnsupportedGdiplusVersion" + case GdiplusNotInitialized: + return "GdiplusNotInitialized" + case PropertyNotFound: + return "PropertyNotFound" + case PropertyNotSupported: + return "PropertyNotSupported" + case ProfileNotFound: + return "ProfileNotFound" + } + return "Unknown Status Value" +} + +var ( + token uintptr + + modgdiplus = syscall.NewLazyDLL("gdiplus.dll") + + procGdipCreateBitmapFromFile = modgdiplus.NewProc("GdipCreateBitmapFromFile") + procGdipCreateBitmapFromHBITMAP = modgdiplus.NewProc("GdipCreateBitmapFromHBITMAP") + procGdipCreateHBITMAPFromBitmap = modgdiplus.NewProc("GdipCreateHBITMAPFromBitmap") + procGdipCreateBitmapFromResource = modgdiplus.NewProc("GdipCreateBitmapFromResource") + procGdipCreateBitmapFromStream = modgdiplus.NewProc("GdipCreateBitmapFromStream") + procGdipDisposeImage = modgdiplus.NewProc("GdipDisposeImage") + procGdiplusShutdown = modgdiplus.NewProc("GdiplusShutdown") + procGdiplusStartup = modgdiplus.NewProc("GdiplusStartup") +) + +func GdipCreateBitmapFromFile(filename string) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromFile.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(filename))), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdipCreateBitmapFromFile failed with status '%s' for file '%s'", GetGpStatus(int32(ret)), filename)) + } + + return bitmap, nil +} + +func GdipCreateBitmapFromResource(instance HINSTANCE, resId *uint16) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromResource.Call( + uintptr(instance), + uintptr(unsafe.Pointer(resId)), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdiCreateBitmapFromResource failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return bitmap, nil +} + +func GdipCreateBitmapFromStream(stream *IStream) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromStream.Call( + uintptr(unsafe.Pointer(stream)), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdipCreateBitmapFromStream failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return bitmap, nil +} + +func GdipCreateHBITMAPFromBitmap(bitmap *uintptr, background uint32) (HBITMAP, error) { + var hbitmap HBITMAP + ret, _, _ := procGdipCreateHBITMAPFromBitmap.Call( + uintptr(unsafe.Pointer(bitmap)), + uintptr(unsafe.Pointer(&hbitmap)), + uintptr(background)) + + if ret != Ok { + return 0, errors.New(fmt.Sprintf("GdipCreateHBITMAPFromBitmap failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return hbitmap, nil +} + +func GdipDisposeImage(image *uintptr) { + procGdipDisposeImage.Call(uintptr(unsafe.Pointer(image))) +} + +func GdiplusShutdown() { + procGdiplusShutdown.Call(token) +} + +func GdiplusStartup(input *GdiplusStartupInput, output *GdiplusStartupOutput) { + ret, _, _ := procGdiplusStartup.Call( + uintptr(unsafe.Pointer(&token)), + uintptr(unsafe.Pointer(input)), + uintptr(unsafe.Pointer(output))) + + if ret != Ok { + panic("GdiplusStartup failed with status " + GetGpStatus(int32(ret))) + } +} diff --git a/vendor/github.com/shirou/w32/idispatch.go b/vendor/github.com/shirou/w32/idispatch.go new file mode 100644 index 00000000..d6c2504d --- /dev/null +++ b/vendor/github.com/shirou/w32/idispatch.go @@ -0,0 +1,45 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "unsafe" +) + +type pIDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +type IDispatch struct { + lpVtbl *pIDispatchVtbl +} + +func (this *IDispatch) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface((*IUnknown)(unsafe.Pointer(this)), id) +} + +func (this *IDispatch) AddRef() int32 { + return ComAddRef((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IDispatch) Release() int32 { + return ComRelease((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IDispatch) GetIDsOfName(names []string) []int32 { + return ComGetIDsOfName(this, names) +} + +func (this *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) *VARIANT { + return ComInvoke(this, dispid, dispatch, params...) +} diff --git a/vendor/github.com/shirou/w32/istream.go b/vendor/github.com/shirou/w32/istream.go new file mode 100644 index 00000000..0bb28222 --- /dev/null +++ b/vendor/github.com/shirou/w32/istream.go @@ -0,0 +1,33 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "unsafe" +) + +type pIStreamVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr +} + +type IStream struct { + lpVtbl *pIStreamVtbl +} + +func (this *IStream) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface((*IUnknown)(unsafe.Pointer(this)), id) +} + +func (this *IStream) AddRef() int32 { + return ComAddRef((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IStream) Release() int32 { + return ComRelease((*IUnknown)(unsafe.Pointer(this))) +} diff --git a/vendor/github.com/shirou/w32/iunknown.go b/vendor/github.com/shirou/w32/iunknown.go new file mode 100644 index 00000000..847fba7e --- /dev/null +++ b/vendor/github.com/shirou/w32/iunknown.go @@ -0,0 +1,29 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +type pIUnknownVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr +} + +type IUnknown struct { + lpVtbl *pIUnknownVtbl +} + +func (this *IUnknown) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface(this, id) +} + +func (this *IUnknown) AddRef() int32 { + return ComAddRef(this) +} + +func (this *IUnknown) Release() int32 { + return ComRelease(this) +} diff --git a/vendor/github.com/shirou/w32/kernel32.go b/vendor/github.com/shirou/w32/kernel32.go new file mode 100644 index 00000000..5d5b4d8a --- /dev/null +++ b/vendor/github.com/shirou/w32/kernel32.go @@ -0,0 +1,316 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetModuleHandle = modkernel32.NewProc("GetModuleHandleW") + procMulDiv = modkernel32.NewProc("MulDiv") + procGetConsoleWindow = modkernel32.NewProc("GetConsoleWindow") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procLstrlen = modkernel32.NewProc("lstrlenW") + procLstrcpy = modkernel32.NewProc("lstrcpyW") + procGlobalAlloc = modkernel32.NewProc("GlobalAlloc") + procGlobalFree = modkernel32.NewProc("GlobalFree") + procGlobalLock = modkernel32.NewProc("GlobalLock") + procGlobalUnlock = modkernel32.NewProc("GlobalUnlock") + procMoveMemory = modkernel32.NewProc("RtlMoveMemory") + procFindResource = modkernel32.NewProc("FindResourceW") + procSizeofResource = modkernel32.NewProc("SizeofResource") + procLockResource = modkernel32.NewProc("LockResource") + procLoadResource = modkernel32.NewProc("LoadResource") + procGetLastError = modkernel32.NewProc("GetLastError") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procModule32First = modkernel32.NewProc("Module32FirstW") + procModule32Next = modkernel32.NewProc("Module32NextW") + procProcess32First = modkernel32.NewProc("Process32FirstW") + procProcess32Next = modkernel32.NewProc("Process32NextW") + procGetSystemTimes = modkernel32.NewProc("GetSystemTimes") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = modkernel32.NewProc("SetConsoleTextAttribute") + procGetDiskFreeSpaceEx = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") +) + +func GetModuleHandle(modulename string) HINSTANCE { + var mn uintptr + if modulename == "" { + mn = 0 + } else { + mn = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(modulename))) + } + ret, _, _ := procGetModuleHandle.Call(mn) + return HINSTANCE(ret) +} + +func MulDiv(number, numerator, denominator int) int { + ret, _, _ := procMulDiv.Call( + uintptr(number), + uintptr(numerator), + uintptr(denominator)) + + return int(ret) +} + +func GetConsoleWindow() HWND { + ret, _, _ := procGetConsoleWindow.Call() + + return HWND(ret) +} + +func GetCurrentThread() HANDLE { + ret, _, _ := procGetCurrentThread.Call() + + return HANDLE(ret) +} + +func GetLogicalDrives() uint32 { + ret, _, _ := procGetLogicalDrives.Call() + + return uint32(ret) +} + +func GetUserDefaultLCID() uint32 { + ret, _, _ := procGetUserDefaultLCID.Call() + + return uint32(ret) +} + +func Lstrlen(lpString *uint16) int { + ret, _, _ := procLstrlen.Call(uintptr(unsafe.Pointer(lpString))) + + return int(ret) +} + +func Lstrcpy(buf []uint16, lpString *uint16) { + procLstrcpy.Call( + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(lpString))) +} + +func GlobalAlloc(uFlags uint, dwBytes uint32) HGLOBAL { + ret, _, _ := procGlobalAlloc.Call( + uintptr(uFlags), + uintptr(dwBytes)) + + if ret == 0 { + panic("GlobalAlloc failed") + } + + return HGLOBAL(ret) +} + +func GlobalFree(hMem HGLOBAL) { + ret, _, _ := procGlobalFree.Call(uintptr(hMem)) + + if ret != 0 { + panic("GlobalFree failed") + } +} + +func GlobalLock(hMem HGLOBAL) unsafe.Pointer { + ret, _, _ := procGlobalLock.Call(uintptr(hMem)) + + if ret == 0 { + panic("GlobalLock failed") + } + + return unsafe.Pointer(ret) +} + +func GlobalUnlock(hMem HGLOBAL) bool { + ret, _, _ := procGlobalUnlock.Call(uintptr(hMem)) + + return ret != 0 +} + +func MoveMemory(destination, source unsafe.Pointer, length uint32) { + procMoveMemory.Call( + uintptr(unsafe.Pointer(destination)), + uintptr(source), + uintptr(length)) +} + +func FindResource(hModule HMODULE, lpName, lpType *uint16) (HRSRC, error) { + ret, _, _ := procFindResource.Call( + uintptr(hModule), + uintptr(unsafe.Pointer(lpName)), + uintptr(unsafe.Pointer(lpType))) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HRSRC(ret), nil +} + +func SizeofResource(hModule HMODULE, hResInfo HRSRC) uint32 { + ret, _, _ := procSizeofResource.Call( + uintptr(hModule), + uintptr(hResInfo)) + + if ret == 0 { + panic("SizeofResource failed") + } + + return uint32(ret) +} + +func LockResource(hResData HGLOBAL) unsafe.Pointer { + ret, _, _ := procLockResource.Call(uintptr(hResData)) + + if ret == 0 { + panic("LockResource failed") + } + + return unsafe.Pointer(ret) +} + +func LoadResource(hModule HMODULE, hResInfo HRSRC) HGLOBAL { + ret, _, _ := procLoadResource.Call( + uintptr(hModule), + uintptr(hResInfo)) + + if ret == 0 { + panic("LoadResource failed") + } + + return HGLOBAL(ret) +} + +func GetLastError() uint32 { + ret, _, _ := procGetLastError.Call() + return uint32(ret) +} + +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) HANDLE { + inherit := 0 + if inheritHandle { + inherit = 1 + } + + ret, _, _ := procOpenProcess.Call( + uintptr(desiredAccess), + uintptr(inherit), + uintptr(processId)) + return HANDLE(ret) +} + +func TerminateProcess(hProcess HANDLE, uExitCode uint) bool { + ret, _, _ := procTerminateProcess.Call( + uintptr(hProcess), + uintptr(uExitCode)) + return ret != 0 +} + +func CloseHandle(object HANDLE) bool { + ret, _, _ := procCloseHandle.Call( + uintptr(object)) + return ret != 0 +} + +func CreateToolhelp32Snapshot(flags, processId uint32) HANDLE { + ret, _, _ := procCreateToolhelp32Snapshot.Call( + uintptr(flags), + uintptr(processId)) + + if ret <= 0 { + return HANDLE(0) + } + + return HANDLE(ret) +} + +func Module32First(snapshot HANDLE, me *MODULEENTRY32) bool { + ret, _, _ := procModule32First.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(me))) + + return ret != 0 +} + +func Module32Next(snapshot HANDLE, me *MODULEENTRY32) bool { + ret, _, _ := procModule32Next.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(me))) + + return ret != 0 +} +func Process32First(snapshot HANDLE, pe *PROCESSENTRY32) bool { + ret, _, _ := procProcess32First.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(pe))) + + return ret != 0 +} + +func Process32Next(snapshot HANDLE, pe *PROCESSENTRY32) bool { + ret, _, _ := procProcess32Next.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(pe))) + + return ret != 0 +} +func GetSystemTimes(lpIdleTime, lpKernelTime, lpUserTime *FILETIME) bool { + ret, _, _ := procGetSystemTimes.Call( + uintptr(unsafe.Pointer(lpIdleTime)), + uintptr(unsafe.Pointer(lpKernelTime)), + uintptr(unsafe.Pointer(lpUserTime))) + + return ret != 0 +} + +func GetProcessTimes(hProcess HANDLE, lpCreationTime, lpExitTime, lpKernelTime, lpUserTime *FILETIME) bool { + ret, _, _ := procGetProcessTimes.Call( + uintptr(hProcess), + uintptr(unsafe.Pointer(lpCreationTime)), + uintptr(unsafe.Pointer(lpExitTime)), + uintptr(unsafe.Pointer(lpKernelTime)), + uintptr(unsafe.Pointer(lpUserTime))) + + return ret != 0 +} + +func GetConsoleScreenBufferInfo(hConsoleOutput HANDLE) *CONSOLE_SCREEN_BUFFER_INFO { + var csbi CONSOLE_SCREEN_BUFFER_INFO + ret, _, _ := procGetConsoleScreenBufferInfo.Call( + uintptr(hConsoleOutput), + uintptr(unsafe.Pointer(&csbi))) + if ret == 0 { + return nil + } + return &csbi +} + +func SetConsoleTextAttribute(hConsoleOutput HANDLE, wAttributes uint16) bool { + ret, _, _ := procSetConsoleTextAttribute.Call( + uintptr(hConsoleOutput), + uintptr(wAttributes)) + return ret != 0 +} + +func GetDiskFreeSpaceEx(dirName string) (r bool, + freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64) { + ret, _, _ := procGetDiskFreeSpaceEx.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(dirName))), + uintptr(unsafe.Pointer(&freeBytesAvailable)), + uintptr(unsafe.Pointer(&totalNumberOfBytes)), + uintptr(unsafe.Pointer(&totalNumberOfFreeBytes))) + return ret != 0, + freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes +} diff --git a/vendor/github.com/shirou/w32/ole32.go b/vendor/github.com/shirou/w32/ole32.go new file mode 100644 index 00000000..48589848 --- /dev/null +++ b/vendor/github.com/shirou/w32/ole32.go @@ -0,0 +1,65 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modole32 = syscall.NewLazyDLL("ole32.dll") + + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitialize = modole32.NewProc("CoInitialize") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCreateStreamOnHGlobal = modole32.NewProc("CreateStreamOnHGlobal") +) + +func CoInitializeEx(coInit uintptr) HRESULT { + ret, _, _ := procCoInitializeEx.Call( + 0, + coInit) + + switch uint32(ret) { + case E_INVALIDARG: + panic("CoInitializeEx failed with E_INVALIDARG") + case E_OUTOFMEMORY: + panic("CoInitializeEx failed with E_OUTOFMEMORY") + case E_UNEXPECTED: + panic("CoInitializeEx failed with E_UNEXPECTED") + } + + return HRESULT(ret) +} + +func CoInitialize() { + procCoInitialize.Call(0) +} + +func CoUninitialize() { + procCoUninitialize.Call() +} + +func CreateStreamOnHGlobal(hGlobal HGLOBAL, fDeleteOnRelease bool) *IStream { + stream := new(IStream) + ret, _, _ := procCreateStreamOnHGlobal.Call( + uintptr(hGlobal), + uintptr(BoolToBOOL(fDeleteOnRelease)), + uintptr(unsafe.Pointer(&stream))) + + switch uint32(ret) { + case E_INVALIDARG: + panic("CreateStreamOnHGlobal failed with E_INVALIDARG") + case E_OUTOFMEMORY: + panic("CreateStreamOnHGlobal failed with E_OUTOFMEMORY") + case E_UNEXPECTED: + panic("CreateStreamOnHGlobal failed with E_UNEXPECTED") + } + + return stream +} diff --git a/vendor/github.com/shirou/w32/oleaut32.go b/vendor/github.com/shirou/w32/oleaut32.go new file mode 100644 index 00000000..cdfcb003 --- /dev/null +++ b/vendor/github.com/shirou/w32/oleaut32.go @@ -0,0 +1,50 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modoleaut32 = syscall.NewLazyDLL("oleaut32") + + procVariantInit = modoleaut32.NewProc("VariantInit") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") +) + +func VariantInit(v *VARIANT) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + panic("Invoke VariantInit error.") + } + return +} + +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +func SysFreeString(v *int16) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + panic("Invoke SysFreeString error.") + } + return +} + +func SysStringLen(v *int16) uint { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint(l) +} diff --git a/vendor/github.com/shirou/w32/opengl32.go b/vendor/github.com/shirou/w32/opengl32.go new file mode 100644 index 00000000..4f35f19e --- /dev/null +++ b/vendor/github.com/shirou/w32/opengl32.go @@ -0,0 +1,74 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modopengl32 = syscall.NewLazyDLL("opengl32.dll") + + procwglCreateContext = modopengl32.NewProc("wglCreateContext") + procwglCreateLayerContext = modopengl32.NewProc("wglCreateLayerContext") + procwglDeleteContext = modopengl32.NewProc("wglDeleteContext") + procwglGetProcAddress = modopengl32.NewProc("wglGetProcAddress") + procwglMakeCurrent = modopengl32.NewProc("wglMakeCurrent") + procwglShareLists = modopengl32.NewProc("wglShareLists") +) + +func WglCreateContext(hdc HDC) HGLRC { + ret, _, _ := procwglCreateContext.Call( + uintptr(hdc), + ) + + return HGLRC(ret) +} + +func WglCreateLayerContext(hdc HDC, iLayerPlane int) HGLRC { + ret, _, _ := procwglCreateLayerContext.Call( + uintptr(hdc), + uintptr(iLayerPlane), + ) + + return HGLRC(ret) +} + +func WglDeleteContext(hglrc HGLRC) bool { + ret, _, _ := procwglDeleteContext.Call( + uintptr(hglrc), + ) + + return ret == TRUE +} + +func WglGetProcAddress(szProc string) uintptr { + ret, _, _ := procwglGetProcAddress.Call( + uintptr(unsafe.Pointer(syscall.StringBytePtr(szProc))), + ) + + return ret +} + +func WglMakeCurrent(hdc HDC, hglrc HGLRC) bool { + ret, _, _ := procwglMakeCurrent.Call( + uintptr(hdc), + uintptr(hglrc), + ) + + return ret == TRUE +} + +func WglShareLists(hglrc1, hglrc2 HGLRC) bool { + ret, _, _ := procwglShareLists.Call( + uintptr(hglrc1), + uintptr(hglrc2), + ) + + return ret == TRUE +} diff --git a/vendor/github.com/shirou/w32/psapi.go b/vendor/github.com/shirou/w32/psapi.go new file mode 100644 index 00000000..ab7858cb --- /dev/null +++ b/vendor/github.com/shirou/w32/psapi.go @@ -0,0 +1,27 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + + procEnumProcesses = modpsapi.NewProc("EnumProcesses") +) + +func EnumProcesses(processIds []uint32, cb uint32, bytesReturned *uint32) bool { + ret, _, _ := procEnumProcesses.Call( + uintptr(unsafe.Pointer(&processIds[0])), + uintptr(cb), + uintptr(unsafe.Pointer(bytesReturned))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/shell32.go b/vendor/github.com/shirou/w32/shell32.go new file mode 100644 index 00000000..0f5ce8cb --- /dev/null +++ b/vendor/github.com/shirou/w32/shell32.go @@ -0,0 +1,155 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +var ( + modshell32 = syscall.NewLazyDLL("shell32.dll") + + procSHBrowseForFolder = modshell32.NewProc("SHBrowseForFolderW") + procSHGetPathFromIDList = modshell32.NewProc("SHGetPathFromIDListW") + procDragAcceptFiles = modshell32.NewProc("DragAcceptFiles") + procDragQueryFile = modshell32.NewProc("DragQueryFileW") + procDragQueryPoint = modshell32.NewProc("DragQueryPoint") + procDragFinish = modshell32.NewProc("DragFinish") + procShellExecute = modshell32.NewProc("ShellExecuteW") + procExtractIcon = modshell32.NewProc("ExtractIconW") +) + +func SHBrowseForFolder(bi *BROWSEINFO) uintptr { + ret, _, _ := procSHBrowseForFolder.Call(uintptr(unsafe.Pointer(bi))) + + return ret +} + +func SHGetPathFromIDList(idl uintptr) string { + buf := make([]uint16, 1024) + procSHGetPathFromIDList.Call( + idl, + uintptr(unsafe.Pointer(&buf[0]))) + + return syscall.UTF16ToString(buf) +} + +func DragAcceptFiles(hwnd HWND, accept bool) { + procDragAcceptFiles.Call( + uintptr(hwnd), + uintptr(BoolToBOOL(accept))) +} + +func DragQueryFile(hDrop HDROP, iFile uint) (fileName string, fileCount uint) { + ret, _, _ := procDragQueryFile.Call( + uintptr(hDrop), + uintptr(iFile), + 0, + 0) + + fileCount = uint(ret) + + if iFile != 0xFFFFFFFF { + buf := make([]uint16, fileCount+1) + + ret, _, _ := procDragQueryFile.Call( + uintptr(hDrop), + uintptr(iFile), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(fileCount+1)) + + if ret == 0 { + panic("Invoke DragQueryFile error.") + } + + fileName = syscall.UTF16ToString(buf) + } + + return +} + +func DragQueryPoint(hDrop HDROP) (x, y int, isClientArea bool) { + var pt POINT + ret, _, _ := procDragQueryPoint.Call( + uintptr(hDrop), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y), (ret == 1) +} + +func DragFinish(hDrop HDROP) { + procDragFinish.Call(uintptr(hDrop)) +} + +func ShellExecute(hwnd HWND, lpOperation, lpFile, lpParameters, lpDirectory string, nShowCmd int) error { + var op, param, directory uintptr + if len(lpOperation) != 0 { + op = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpOperation))) + } + if len(lpParameters) != 0 { + param = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpParameters))) + } + if len(lpDirectory) != 0 { + directory = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDirectory))) + } + + ret, _, _ := procShellExecute.Call( + uintptr(hwnd), + op, + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpFile))), + param, + directory, + uintptr(nShowCmd)) + + errorMsg := "" + if ret != 0 && ret <= 32 { + switch int(ret) { + case ERROR_FILE_NOT_FOUND: + errorMsg = "The specified file was not found." + case ERROR_PATH_NOT_FOUND: + errorMsg = "The specified path was not found." + case ERROR_BAD_FORMAT: + errorMsg = "The .exe file is invalid (non-Win32 .exe or error in .exe image)." + case SE_ERR_ACCESSDENIED: + errorMsg = "The operating system denied access to the specified file." + case SE_ERR_ASSOCINCOMPLETE: + errorMsg = "The file name association is incomplete or invalid." + case SE_ERR_DDEBUSY: + errorMsg = "The DDE transaction could not be completed because other DDE transactions were being processed." + case SE_ERR_DDEFAIL: + errorMsg = "The DDE transaction failed." + case SE_ERR_DDETIMEOUT: + errorMsg = "The DDE transaction could not be completed because the request timed out." + case SE_ERR_DLLNOTFOUND: + errorMsg = "The specified DLL was not found." + case SE_ERR_NOASSOC: + errorMsg = "There is no application associated with the given file name extension. This error will also be returned if you attempt to print a file that is not printable." + case SE_ERR_OOM: + errorMsg = "There was not enough memory to complete the operation." + case SE_ERR_SHARE: + errorMsg = "A sharing violation occurred." + default: + errorMsg = fmt.Sprintf("Unknown error occurred with error code %v", ret) + } + } else { + return nil + } + + return errors.New(errorMsg) +} + +func ExtractIcon(lpszExeFileName string, nIconIndex int) HICON { + ret, _, _ := procExtractIcon.Call( + 0, + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpszExeFileName))), + uintptr(nIconIndex)) + + return HICON(ret) +} diff --git a/vendor/github.com/shirou/w32/typedef.go b/vendor/github.com/shirou/w32/typedef.go new file mode 100644 index 00000000..65f51112 --- /dev/null +++ b/vendor/github.com/shirou/w32/typedef.go @@ -0,0 +1,901 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +import ( + "unsafe" +) + +// From MSDN: Windows Data Types +// http://msdn.microsoft.com/en-us/library/s3f49ktz.aspx +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx +// ATOM WORD +// BOOL int32 +// BOOLEAN byte +// BYTE byte +// CCHAR int8 +// CHAR int8 +// COLORREF DWORD +// DWORD uint32 +// DWORDLONG ULONGLONG +// DWORD_PTR ULONG_PTR +// DWORD32 uint32 +// DWORD64 uint64 +// FLOAT float32 +// HACCEL HANDLE +// HALF_PTR struct{} // ??? +// HANDLE PVOID +// HBITMAP HANDLE +// HBRUSH HANDLE +// HCOLORSPACE HANDLE +// HCONV HANDLE +// HCONVLIST HANDLE +// HCURSOR HANDLE +// HDC HANDLE +// HDDEDATA HANDLE +// HDESK HANDLE +// HDROP HANDLE +// HDWP HANDLE +// HENHMETAFILE HANDLE +// HFILE HANDLE +// HFONT HANDLE +// HGDIOBJ HANDLE +// HGLOBAL HANDLE +// HHOOK HANDLE +// HICON HANDLE +// HINSTANCE HANDLE +// HKEY HANDLE +// HKL HANDLE +// HLOCAL HANDLE +// HMENU HANDLE +// HMETAFILE HANDLE +// HMODULE HANDLE +// HPALETTE HANDLE +// HPEN HANDLE +// HRESULT int32 +// HRGN HANDLE +// HSZ HANDLE +// HWINSTA HANDLE +// HWND HANDLE +// INT int32 +// INT_PTR uintptr +// INT8 int8 +// INT16 int16 +// INT32 int32 +// INT64 int64 +// LANGID WORD +// LCID DWORD +// LCTYPE DWORD +// LGRPID DWORD +// LONG int32 +// LONGLONG int64 +// LONG_PTR uintptr +// LONG32 int32 +// LONG64 int64 +// LPARAM LONG_PTR +// LPBOOL *BOOL +// LPBYTE *BYTE +// LPCOLORREF *COLORREF +// LPCSTR *int8 +// LPCTSTR LPCWSTR +// LPCVOID unsafe.Pointer +// LPCWSTR *WCHAR +// LPDWORD *DWORD +// LPHANDLE *HANDLE +// LPINT *INT +// LPLONG *LONG +// LPSTR *CHAR +// LPTSTR LPWSTR +// LPVOID unsafe.Pointer +// LPWORD *WORD +// LPWSTR *WCHAR +// LRESULT LONG_PTR +// PBOOL *BOOL +// PBOOLEAN *BOOLEAN +// PBYTE *BYTE +// PCHAR *CHAR +// PCSTR *CHAR +// PCTSTR PCWSTR +// PCWSTR *WCHAR +// PDWORD *DWORD +// PDWORDLONG *DWORDLONG +// PDWORD_PTR *DWORD_PTR +// PDWORD32 *DWORD32 +// PDWORD64 *DWORD64 +// PFLOAT *FLOAT +// PHALF_PTR *HALF_PTR +// PHANDLE *HANDLE +// PHKEY *HKEY +// PINT_PTR *INT_PTR +// PINT8 *INT8 +// PINT16 *INT16 +// PINT32 *INT32 +// PINT64 *INT64 +// PLCID *LCID +// PLONG *LONG +// PLONGLONG *LONGLONG +// PLONG_PTR *LONG_PTR +// PLONG32 *LONG32 +// PLONG64 *LONG64 +// POINTER_32 struct{} // ??? +// POINTER_64 struct{} // ??? +// POINTER_SIGNED uintptr +// POINTER_UNSIGNED uintptr +// PSHORT *SHORT +// PSIZE_T *SIZE_T +// PSSIZE_T *SSIZE_T +// PSTR *CHAR +// PTBYTE *TBYTE +// PTCHAR *TCHAR +// PTSTR PWSTR +// PUCHAR *UCHAR +// PUHALF_PTR *UHALF_PTR +// PUINT *UINT +// PUINT_PTR *UINT_PTR +// PUINT8 *UINT8 +// PUINT16 *UINT16 +// PUINT32 *UINT32 +// PUINT64 *UINT64 +// PULONG *ULONG +// PULONGLONG *ULONGLONG +// PULONG_PTR *ULONG_PTR +// PULONG32 *ULONG32 +// PULONG64 *ULONG64 +// PUSHORT *USHORT +// PVOID unsafe.Pointer +// PWCHAR *WCHAR +// PWORD *WORD +// PWSTR *WCHAR +// QWORD uint64 +// SC_HANDLE HANDLE +// SC_LOCK LPVOID +// SERVICE_STATUS_HANDLE HANDLE +// SHORT int16 +// SIZE_T ULONG_PTR +// SSIZE_T LONG_PTR +// TBYTE WCHAR +// TCHAR WCHAR +// UCHAR uint8 +// UHALF_PTR struct{} // ??? +// UINT uint32 +// UINT_PTR uintptr +// UINT8 uint8 +// UINT16 uint16 +// UINT32 uint32 +// UINT64 uint64 +// ULONG uint32 +// ULONGLONG uint64 +// ULONG_PTR uintptr +// ULONG32 uint32 +// ULONG64 uint64 +// USHORT uint16 +// USN LONGLONG +// WCHAR uint16 +// WORD uint16 +// WPARAM UINT_PTR +type ( + ATOM uint16 + BOOL int32 + COLORREF uint32 + DWM_FRAME_COUNT uint64 + HACCEL HANDLE + HANDLE uintptr + HBITMAP HANDLE + HBRUSH HANDLE + HCURSOR HANDLE + HDC HANDLE + HDROP HANDLE + HDWP HANDLE + HENHMETAFILE HANDLE + HFONT HANDLE + HGDIOBJ HANDLE + HGLOBAL HANDLE + HGLRC HANDLE + HICON HANDLE + HIMAGELIST HANDLE + HINSTANCE HANDLE + HKEY HANDLE + HKL HANDLE + HMENU HANDLE + HMODULE HANDLE + HMONITOR HANDLE + HPEN HANDLE + HRESULT int32 + HRGN HANDLE + HRSRC HANDLE + HTHUMBNAIL HANDLE + HWND HANDLE + LPCVOID unsafe.Pointer + PVOID unsafe.Pointer + QPC_TIME uint64 +) + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162805.aspx +type POINT struct { + X, Y int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162897.aspx +type RECT struct { + Left, Top, Right, Bottom int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms633577.aspx +type WNDCLASSEX struct { + Size uint32 + Style uint32 + WndProc uintptr + ClsExtra int32 + WndExtra int32 + Instance HINSTANCE + Icon HICON + Cursor HCURSOR + Background HBRUSH + MenuName *uint16 + ClassName *uint16 + IconSm HICON +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms644958.aspx +type MSG struct { + Hwnd HWND + Message uint32 + WParam uintptr + LParam uintptr + Time uint32 + Pt POINT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145037.aspx +type LOGFONT struct { + Height int32 + Width int32 + Escapement int32 + Orientation int32 + Weight int32 + Italic byte + Underline byte + StrikeOut byte + CharSet byte + OutPrecision byte + ClipPrecision byte + Quality byte + PitchAndFamily byte + FaceName [LF_FACESIZE]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646839.aspx +type OPENFILENAME struct { + StructSize uint32 + Owner HWND + Instance HINSTANCE + Filter *uint16 + CustomFilter *uint16 + MaxCustomFilter uint32 + FilterIndex uint32 + File *uint16 + MaxFile uint32 + FileTitle *uint16 + MaxFileTitle uint32 + InitialDir *uint16 + Title *uint16 + Flags uint32 + FileOffset uint16 + FileExtension uint16 + DefExt *uint16 + CustData uintptr + FnHook uintptr + TemplateName *uint16 + PvReserved unsafe.Pointer + DwReserved uint32 + FlagsEx uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb773205.aspx +type BROWSEINFO struct { + Owner HWND + Root *uint16 + DisplayName *uint16 + Title *uint16 + Flags uint32 + CallbackFunc uintptr + LParam uintptr + Image int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221627.aspx +type VARIANT struct { + VT uint16 // 2 + WReserved1 uint16 // 4 + WReserved2 uint16 // 6 + WReserved3 uint16 // 8 + Val int64 // 16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221416.aspx +type DISPPARAMS struct { + Rgvarg uintptr + RgdispidNamedArgs uintptr + CArgs uint32 + CNamedArgs uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221133.aspx +type EXCEPINFO struct { + WCode uint16 + WReserved uint16 + BstrSource *uint16 + BstrDescription *uint16 + BstrHelpFile *uint16 + DwHelpContext uint32 + PvReserved uintptr + PfnDeferredFillIn uintptr + Scode int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145035.aspx +type LOGBRUSH struct { + LbStyle uint32 + LbColor COLORREF + LbHatch uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183565.aspx +type DEVMODE struct { + DmDeviceName [CCHDEVICENAME]uint16 + DmSpecVersion uint16 + DmDriverVersion uint16 + DmSize uint16 + DmDriverExtra uint16 + DmFields uint32 + DmOrientation int16 + DmPaperSize int16 + DmPaperLength int16 + DmPaperWidth int16 + DmScale int16 + DmCopies int16 + DmDefaultSource int16 + DmPrintQuality int16 + DmColor int16 + DmDuplex int16 + DmYResolution int16 + DmTTOption int16 + DmCollate int16 + DmFormName [CCHFORMNAME]uint16 + DmLogPixels uint16 + DmBitsPerPel uint32 + DmPelsWidth uint32 + DmPelsHeight uint32 + DmDisplayFlags uint32 + DmDisplayFrequency uint32 + DmICMMethod uint32 + DmICMIntent uint32 + DmMediaType uint32 + DmDitherType uint32 + DmReserved1 uint32 + DmReserved2 uint32 + DmPanningWidth uint32 + DmPanningHeight uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183376.aspx +type BITMAPINFOHEADER struct { + BiSize uint32 + BiWidth int32 + BiHeight int32 + BiPlanes uint16 + BiBitCount uint16 + BiCompression uint32 + BiSizeImage uint32 + BiXPelsPerMeter int32 + BiYPelsPerMeter int32 + BiClrUsed uint32 + BiClrImportant uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162938.aspx +type RGBQUAD struct { + RgbBlue byte + RgbGreen byte + RgbRed byte + RgbReserved byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183375.aspx +type BITMAPINFO struct { + BmiHeader BITMAPINFOHEADER + BmiColors *RGBQUAD +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183371.aspx +type BITMAP struct { + BmType int32 + BmWidth int32 + BmHeight int32 + BmWidthBytes int32 + BmPlanes uint16 + BmBitsPixel uint16 + BmBits unsafe.Pointer +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183567.aspx +type DIBSECTION struct { + DsBm BITMAP + DsBmih BITMAPINFOHEADER + DsBitfields [3]uint32 + DshSection HANDLE + DsOffset uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162607.aspx +type ENHMETAHEADER struct { + IType uint32 + NSize uint32 + RclBounds RECT + RclFrame RECT + DSignature uint32 + NVersion uint32 + NBytes uint32 + NRecords uint32 + NHandles uint16 + SReserved uint16 + NDescription uint32 + OffDescription uint32 + NPalEntries uint32 + SzlDevice SIZE + SzlMillimeters SIZE + CbPixelFormat uint32 + OffPixelFormat uint32 + BOpenGL uint32 + SzlMicrometers SIZE +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145106.aspx +type SIZE struct { + CX, CY int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145132.aspx +type TEXTMETRIC struct { + TmHeight int32 + TmAscent int32 + TmDescent int32 + TmInternalLeading int32 + TmExternalLeading int32 + TmAveCharWidth int32 + TmMaxCharWidth int32 + TmWeight int32 + TmOverhang int32 + TmDigitizedAspectX int32 + TmDigitizedAspectY int32 + TmFirstChar uint16 + TmLastChar uint16 + TmDefaultChar uint16 + TmBreakChar uint16 + TmItalic byte + TmUnderlined byte + TmStruckOut byte + TmPitchAndFamily byte + TmCharSet byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183574.aspx +type DOCINFO struct { + CbSize int32 + LpszDocName *uint16 + LpszOutput *uint16 + LpszDatatype *uint16 + FwType uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb775514.aspx +type NMHDR struct { + HwndFrom HWND + IdFrom uintptr + Code uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774743.aspx +type LVCOLUMN struct { + Mask uint32 + Fmt int32 + Cx int32 + PszText *uint16 + CchTextMax int32 + ISubItem int32 + IImage int32 + IOrder int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774760.aspx +type LVITEM struct { + Mask uint32 + IItem int32 + ISubItem int32 + State uint32 + StateMask uint32 + PszText *uint16 + CchTextMax int32 + IImage int32 + LParam uintptr + IIndent int32 + IGroupId int32 + CColumns uint32 + PuColumns uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774754.aspx +type LVHITTESTINFO struct { + Pt POINT + Flags uint32 + IItem int32 + ISubItem int32 + IGroup int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774771.aspx +type NMITEMACTIVATE struct { + Hdr NMHDR + IItem int32 + ISubItem int32 + UNewState uint32 + UOldState uint32 + UChanged uint32 + PtAction POINT + LParam uintptr + UKeyFlags uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774773.aspx +type NMLISTVIEW struct { + Hdr NMHDR + IItem int32 + ISubItem int32 + UNewState uint32 + UOldState uint32 + UChanged uint32 + PtAction POINT + LParam uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774780.aspx +type NMLVDISPINFO struct { + Hdr NMHDR + Item LVITEM +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb775507.aspx +type INITCOMMONCONTROLSEX struct { + DwSize uint32 + DwICC uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb760256.aspx +type TOOLINFO struct { + CbSize uint32 + UFlags uint32 + Hwnd HWND + UId uintptr + Rect RECT + Hinst HINSTANCE + LpszText *uint16 + LParam uintptr + LpReserved unsafe.Pointer +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms645604.aspx +type TRACKMOUSEEVENT struct { + CbSize uint32 + DwFlags uint32 + HwndTrack HWND + DwHoverTime uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms534067.aspx +type GdiplusStartupInput struct { + GdiplusVersion uint32 + DebugEventCallback uintptr + SuppressBackgroundThread BOOL + SuppressExternalCodecs BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms534068.aspx +type GdiplusStartupOutput struct { + NotificationHook uintptr + NotificationUnhook uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162768.aspx +type PAINTSTRUCT struct { + Hdc HDC + FErase BOOL + RcPaint RECT + FRestore BOOL + FIncUpdate BOOL + RgbReserved [32]byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa363646.aspx +type EVENTLOGRECORD struct { + Length uint32 + Reserved uint32 + RecordNumber uint32 + TimeGenerated uint32 + TimeWritten uint32 + EventID uint32 + EventType uint16 + NumStrings uint16 + EventCategory uint16 + ReservedFlags uint16 + ClosingRecordNumber uint32 + StringOffset uint32 + UserSidLength uint32 + UserSidOffset uint32 + DataLength uint32 + DataOffset uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms685996.aspx +type SERVICE_STATUS struct { + DwServiceType uint32 + DwCurrentState uint32 + DwControlsAccepted uint32 + DwWin32ExitCode uint32 + DwServiceSpecificExitCode uint32 + DwCheckPoint uint32 + DwWaitHint uint32 +} +type PROCESSENTRY32 struct { + DwSize uint32 + CntUsage uint32 + Th32ProcessID uint32 + Th32DefaultHeapID uintptr + Th32ModuleID uint32 + CntThreads uint32 + Th32ParentProcessID uint32 + PcPriClassBase int32 + DwFlags uint32 + SzExeFile [MAX_PATH]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms684225.aspx +type MODULEENTRY32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr *uint8 + ModBaseSize uint32 + HModule HMODULE + SzModule [MAX_MODULE_NAME32 + 1]uint16 + SzExePath [MAX_PATH]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms724284.aspx +type FILETIME struct { + DwLowDateTime uint32 + DwHighDateTime uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119.aspx +type COORD struct { + X, Y int16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311.aspx +type SMALL_RECT struct { + Left, Top, Right, Bottom int16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093.aspx +type CONSOLE_SCREEN_BUFFER_INFO struct { + DwSize COORD + DwCursorPosition COORD + WAttributes uint16 + SrWindow SMALL_RECT + DwMaximumWindowSize COORD +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb773244.aspx +type MARGINS struct { + CxLeftWidth, CxRightWidth, CyTopHeight, CyBottomHeight int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969500.aspx +type DWM_BLURBEHIND struct { + DwFlags uint32 + fEnable BOOL + hRgnBlur HRGN + fTransitionOnMaximized BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969501.aspx +type DWM_PRESENT_PARAMETERS struct { + cbSize uint32 + fQueue BOOL + cRefreshStart DWM_FRAME_COUNT + cBuffer uint32 + fUseSourceRate BOOL + rateSource UNSIGNED_RATIO + cRefreshesPerFrame uint32 + eSampling DWM_SOURCE_FRAME_SAMPLING +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969502.aspx +type DWM_THUMBNAIL_PROPERTIES struct { + dwFlags uint32 + rcDestination RECT + rcSource RECT + opacity byte + fVisible BOOL + fSourceClientAreaOnly BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969503.aspx +type DWM_TIMING_INFO struct { + cbSize uint32 + rateRefresh UNSIGNED_RATIO + qpcRefreshPeriod QPC_TIME + rateCompose UNSIGNED_RATIO + qpcVBlank QPC_TIME + cRefresh DWM_FRAME_COUNT + cDXRefresh uint32 + qpcCompose QPC_TIME + cFrame DWM_FRAME_COUNT + cDXPresent uint32 + cRefreshFrame DWM_FRAME_COUNT + cFrameSubmitted DWM_FRAME_COUNT + cDXPresentSubmitted uint32 + cFrameConfirmed DWM_FRAME_COUNT + cDXPresentConfirmed uint32 + cRefreshConfirmed DWM_FRAME_COUNT + cDXRefreshConfirmed uint32 + cFramesLate DWM_FRAME_COUNT + cFramesOutstanding uint32 + cFrameDisplayed DWM_FRAME_COUNT + qpcFrameDisplayed QPC_TIME + cRefreshFrameDisplayed DWM_FRAME_COUNT + cFrameComplete DWM_FRAME_COUNT + qpcFrameComplete QPC_TIME + cFramePending DWM_FRAME_COUNT + qpcFramePending QPC_TIME + cFramesDisplayed DWM_FRAME_COUNT + cFramesComplete DWM_FRAME_COUNT + cFramesPending DWM_FRAME_COUNT + cFramesAvailable DWM_FRAME_COUNT + cFramesDropped DWM_FRAME_COUNT + cFramesMissed DWM_FRAME_COUNT + cRefreshNextDisplayed DWM_FRAME_COUNT + cRefreshNextPresented DWM_FRAME_COUNT + cRefreshesDisplayed DWM_FRAME_COUNT + cRefreshesPresented DWM_FRAME_COUNT + cRefreshStarted DWM_FRAME_COUNT + cPixelsReceived uint64 + cPixelsDrawn uint64 + cBuffersEmpty DWM_FRAME_COUNT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd389402.aspx +type MilMatrix3x2D struct { + S_11, S_12, S_21, S_22 float64 + DX, DY float64 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969505.aspx +type UNSIGNED_RATIO struct { + uiNumerator uint32 + uiDenominator uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms632603.aspx +type CREATESTRUCT struct { + CreateParams uintptr + Instance HINSTANCE + Menu HMENU + Parent HWND + Cy, Cx int32 + Y, X int32 + Style int32 + Name *uint16 + Class *uint16 + dwExStyle uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145065.aspx +type MONITORINFO struct { + CbSize uint32 + RcMonitor RECT + RcWork RECT + DwFlags uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145066.aspx +type MONITORINFOEX struct { + MONITORINFO + SzDevice [CCHDEVICENAME]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd368826.aspx +type PIXELFORMATDESCRIPTOR struct { + Size uint16 + Version uint16 + DwFlags uint32 + IPixelType byte + ColorBits byte + RedBits, RedShift byte + GreenBits, GreenShift byte + BlueBits, BlueShift byte + AlphaBits, AlphaShift byte + AccumBits byte + AccumRedBits byte + AccumGreenBits byte + AccumBlueBits byte + AccumAlphaBits byte + DepthBits, StencilBits byte + AuxBuffers byte + ILayerType byte + Reserved byte + DwLayerMask uint32 + DwVisibleMask uint32 + DwDamageMask uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646270(v=vs.85).aspx +type INPUT struct { + Type uint32 + Mi MOUSEINPUT + Ki KEYBDINPUT + Hi HARDWAREINPUT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646273(v=vs.85).aspx +type MOUSEINPUT struct { + Dx int32 + Dy int32 + MouseData uint32 + DwFlags uint32 + Time uint32 + DwExtraInfo uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646271(v=vs.85).aspx +type KEYBDINPUT struct { + WVk uint16 + WScan uint16 + DwFlags uint32 + Time uint32 + DwExtraInfo uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646269(v=vs.85).aspx +type HARDWAREINPUT struct { + UMsg uint32 + WParamL uint16 + WParamH uint16 +} + +type KbdInput struct { + typ uint32 + ki KEYBDINPUT +} + +type MouseInput struct { + typ uint32 + mi MOUSEINPUT +} + +type HardwareInput struct { + typ uint32 + hi HARDWAREINPUT +} diff --git a/vendor/github.com/shirou/w32/user32.go b/vendor/github.com/shirou/w32/user32.go new file mode 100644 index 00000000..6aa7cd70 --- /dev/null +++ b/vendor/github.com/shirou/w32/user32.go @@ -0,0 +1,950 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "fmt" + "syscall" + "unsafe" +) + +var ( + moduser32 = syscall.NewLazyDLL("user32.dll") + + procRegisterClassEx = moduser32.NewProc("RegisterClassExW") + procLoadIcon = moduser32.NewProc("LoadIconW") + procLoadCursor = moduser32.NewProc("LoadCursorW") + procShowWindow = moduser32.NewProc("ShowWindow") + procUpdateWindow = moduser32.NewProc("UpdateWindow") + procCreateWindowEx = moduser32.NewProc("CreateWindowExW") + procAdjustWindowRect = moduser32.NewProc("AdjustWindowRect") + procAdjustWindowRectEx = moduser32.NewProc("AdjustWindowRectEx") + procDestroyWindow = moduser32.NewProc("DestroyWindow") + procDefWindowProc = moduser32.NewProc("DefWindowProcW") + procDefDlgProc = moduser32.NewProc("DefDlgProcW") + procPostQuitMessage = moduser32.NewProc("PostQuitMessage") + procGetMessage = moduser32.NewProc("GetMessageW") + procTranslateMessage = moduser32.NewProc("TranslateMessage") + procDispatchMessage = moduser32.NewProc("DispatchMessageW") + procSendMessage = moduser32.NewProc("SendMessageW") + procPostMessage = moduser32.NewProc("PostMessageW") + procWaitMessage = moduser32.NewProc("WaitMessage") + procSetWindowText = moduser32.NewProc("SetWindowTextW") + procGetWindowTextLength = moduser32.NewProc("GetWindowTextLengthW") + procGetWindowText = moduser32.NewProc("GetWindowTextW") + procGetWindowRect = moduser32.NewProc("GetWindowRect") + procMoveWindow = moduser32.NewProc("MoveWindow") + procScreenToClient = moduser32.NewProc("ScreenToClient") + procCallWindowProc = moduser32.NewProc("CallWindowProcW") + procSetWindowLong = moduser32.NewProc("SetWindowLongW") + procSetWindowLongPtr = moduser32.NewProc("SetWindowLongW") + procGetWindowLong = moduser32.NewProc("GetWindowLongW") + procGetWindowLongPtr = moduser32.NewProc("GetWindowLongW") + procEnableWindow = moduser32.NewProc("EnableWindow") + procIsWindowEnabled = moduser32.NewProc("IsWindowEnabled") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procSetFocus = moduser32.NewProc("SetFocus") + procInvalidateRect = moduser32.NewProc("InvalidateRect") + procGetClientRect = moduser32.NewProc("GetClientRect") + procGetDC = moduser32.NewProc("GetDC") + procReleaseDC = moduser32.NewProc("ReleaseDC") + procSetCapture = moduser32.NewProc("SetCapture") + procReleaseCapture = moduser32.NewProc("ReleaseCapture") + procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procMessageBox = moduser32.NewProc("MessageBoxW") + procGetSystemMetrics = moduser32.NewProc("GetSystemMetrics") + procCopyRect = moduser32.NewProc("CopyRect") + procEqualRect = moduser32.NewProc("EqualRect") + procInflateRect = moduser32.NewProc("InflateRect") + procIntersectRect = moduser32.NewProc("IntersectRect") + procIsRectEmpty = moduser32.NewProc("IsRectEmpty") + procOffsetRect = moduser32.NewProc("OffsetRect") + procPtInRect = moduser32.NewProc("PtInRect") + procSetRect = moduser32.NewProc("SetRect") + procSetRectEmpty = moduser32.NewProc("SetRectEmpty") + procSubtractRect = moduser32.NewProc("SubtractRect") + procUnionRect = moduser32.NewProc("UnionRect") + procCreateDialogParam = moduser32.NewProc("CreateDialogParamW") + procDialogBoxParam = moduser32.NewProc("DialogBoxParamW") + procGetDlgItem = moduser32.NewProc("GetDlgItem") + procDrawIcon = moduser32.NewProc("DrawIcon") + procClientToScreen = moduser32.NewProc("ClientToScreen") + procIsDialogMessage = moduser32.NewProc("IsDialogMessageW") + procIsWindow = moduser32.NewProc("IsWindow") + procEndDialog = moduser32.NewProc("EndDialog") + procPeekMessage = moduser32.NewProc("PeekMessageW") + procTranslateAccelerator = moduser32.NewProc("TranslateAcceleratorW") + procSetWindowPos = moduser32.NewProc("SetWindowPos") + procFillRect = moduser32.NewProc("FillRect") + procDrawText = moduser32.NewProc("DrawTextW") + procAddClipboardFormatListener = moduser32.NewProc("AddClipboardFormatListener") + procRemoveClipboardFormatListener = moduser32.NewProc("RemoveClipboardFormatListener") + procOpenClipboard = moduser32.NewProc("OpenClipboard") + procCloseClipboard = moduser32.NewProc("CloseClipboard") + procEnumClipboardFormats = moduser32.NewProc("EnumClipboardFormats") + procGetClipboardData = moduser32.NewProc("GetClipboardData") + procSetClipboardData = moduser32.NewProc("SetClipboardData") + procEmptyClipboard = moduser32.NewProc("EmptyClipboard") + procGetClipboardFormatName = moduser32.NewProc("GetClipboardFormatNameW") + procIsClipboardFormatAvailable = moduser32.NewProc("IsClipboardFormatAvailable") + procBeginPaint = moduser32.NewProc("BeginPaint") + procEndPaint = moduser32.NewProc("EndPaint") + procGetKeyboardState = moduser32.NewProc("GetKeyboardState") + procMapVirtualKey = moduser32.NewProc("MapVirtualKeyExW") + procGetAsyncKeyState = moduser32.NewProc("GetAsyncKeyState") + procToAscii = moduser32.NewProc("ToAscii") + procSwapMouseButton = moduser32.NewProc("SwapMouseButton") + procGetCursorPos = moduser32.NewProc("GetCursorPos") + procSetCursorPos = moduser32.NewProc("SetCursorPos") + procSetCursor = moduser32.NewProc("SetCursor") + procCreateIcon = moduser32.NewProc("CreateIcon") + procDestroyIcon = moduser32.NewProc("DestroyIcon") + procMonitorFromPoint = moduser32.NewProc("MonitorFromPoint") + procMonitorFromRect = moduser32.NewProc("MonitorFromRect") + procMonitorFromWindow = moduser32.NewProc("MonitorFromWindow") + procGetMonitorInfo = moduser32.NewProc("GetMonitorInfoW") + procEnumDisplayMonitors = moduser32.NewProc("EnumDisplayMonitors") + procEnumDisplaySettingsEx = moduser32.NewProc("EnumDisplaySettingsExW") + procChangeDisplaySettingsEx = moduser32.NewProc("ChangeDisplaySettingsExW") + procSendInput = moduser32.NewProc("SendInput") +) + +func RegisterClassEx(wndClassEx *WNDCLASSEX) ATOM { + ret, _, _ := procRegisterClassEx.Call(uintptr(unsafe.Pointer(wndClassEx))) + return ATOM(ret) +} + +func LoadIcon(instance HINSTANCE, iconName *uint16) HICON { + ret, _, _ := procLoadIcon.Call( + uintptr(instance), + uintptr(unsafe.Pointer(iconName))) + + return HICON(ret) + +} + +func LoadCursor(instance HINSTANCE, cursorName *uint16) HCURSOR { + ret, _, _ := procLoadCursor.Call( + uintptr(instance), + uintptr(unsafe.Pointer(cursorName))) + + return HCURSOR(ret) + +} + +func ShowWindow(hwnd HWND, cmdshow int) bool { + ret, _, _ := procShowWindow.Call( + uintptr(hwnd), + uintptr(cmdshow)) + + return ret != 0 + +} + +func UpdateWindow(hwnd HWND) bool { + ret, _, _ := procUpdateWindow.Call( + uintptr(hwnd)) + return ret != 0 +} + +func CreateWindowEx(exStyle uint, className, windowName *uint16, + style uint, x, y, width, height int, parent HWND, menu HMENU, + instance HINSTANCE, param unsafe.Pointer) HWND { + ret, _, _ := procCreateWindowEx.Call( + uintptr(exStyle), + uintptr(unsafe.Pointer(className)), + uintptr(unsafe.Pointer(windowName)), + uintptr(style), + uintptr(x), + uintptr(y), + uintptr(width), + uintptr(height), + uintptr(parent), + uintptr(menu), + uintptr(instance), + uintptr(param)) + + return HWND(ret) +} + +func AdjustWindowRectEx(rect *RECT, style uint, menu bool, exStyle uint) bool { + ret, _, _ := procAdjustWindowRectEx.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(style), + uintptr(BoolToBOOL(menu)), + uintptr(exStyle)) + + return ret != 0 +} + +func AdjustWindowRect(rect *RECT, style uint, menu bool) bool { + ret, _, _ := procAdjustWindowRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(style), + uintptr(BoolToBOOL(menu))) + + return ret != 0 +} + +func DestroyWindow(hwnd HWND) bool { + ret, _, _ := procDestroyWindow.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func DefWindowProc(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procDefWindowProc.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func DefDlgProc(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procDefDlgProc.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func PostQuitMessage(exitCode int) { + procPostQuitMessage.Call( + uintptr(exitCode)) +} + +func GetMessage(msg *MSG, hwnd HWND, msgFilterMin, msgFilterMax uint32) int { + ret, _, _ := procGetMessage.Call( + uintptr(unsafe.Pointer(msg)), + uintptr(hwnd), + uintptr(msgFilterMin), + uintptr(msgFilterMax)) + + return int(ret) +} + +func TranslateMessage(msg *MSG) bool { + ret, _, _ := procTranslateMessage.Call( + uintptr(unsafe.Pointer(msg))) + + return ret != 0 + +} + +func DispatchMessage(msg *MSG) uintptr { + ret, _, _ := procDispatchMessage.Call( + uintptr(unsafe.Pointer(msg))) + + return ret + +} + +func SendMessage(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procSendMessage.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func PostMessage(hwnd HWND, msg uint32, wParam, lParam uintptr) bool { + ret, _, _ := procPostMessage.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret != 0 +} + +func WaitMessage() bool { + ret, _, _ := procWaitMessage.Call() + return ret != 0 +} + +func SetWindowText(hwnd HWND, text string) { + procSetWindowText.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(text)))) +} + +func GetWindowTextLength(hwnd HWND) int { + ret, _, _ := procGetWindowTextLength.Call( + uintptr(hwnd)) + + return int(ret) +} + +func GetWindowText(hwnd HWND) string { + textLen := GetWindowTextLength(hwnd) + 1 + + buf := make([]uint16, textLen) + procGetWindowText.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(textLen)) + + return syscall.UTF16ToString(buf) +} + +func GetWindowRect(hwnd HWND) *RECT { + var rect RECT + procGetWindowRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&rect))) + + return &rect +} + +func MoveWindow(hwnd HWND, x, y, width, height int, repaint bool) bool { + ret, _, _ := procMoveWindow.Call( + uintptr(hwnd), + uintptr(x), + uintptr(y), + uintptr(width), + uintptr(height), + uintptr(BoolToBOOL(repaint))) + + return ret != 0 + +} + +func ScreenToClient(hwnd HWND, x, y int) (X, Y int, ok bool) { + pt := POINT{X: int32(x), Y: int32(y)} + ret, _, _ := procScreenToClient.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y), ret != 0 +} + +func CallWindowProc(preWndProc uintptr, hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procCallWindowProc.Call( + preWndProc, + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func SetWindowLong(hwnd HWND, index int, value uint32) uint32 { + ret, _, _ := procSetWindowLong.Call( + uintptr(hwnd), + uintptr(index), + uintptr(value)) + + return uint32(ret) +} + +func SetWindowLongPtr(hwnd HWND, index int, value uintptr) uintptr { + ret, _, _ := procSetWindowLongPtr.Call( + uintptr(hwnd), + uintptr(index), + value) + + return ret +} + +func GetWindowLong(hwnd HWND, index int) int32 { + ret, _, _ := procGetWindowLong.Call( + uintptr(hwnd), + uintptr(index)) + + return int32(ret) +} + +func GetWindowLongPtr(hwnd HWND, index int) uintptr { + ret, _, _ := procGetWindowLongPtr.Call( + uintptr(hwnd), + uintptr(index)) + + return ret +} + +func EnableWindow(hwnd HWND, b bool) bool { + ret, _, _ := procEnableWindow.Call( + uintptr(hwnd), + uintptr(BoolToBOOL(b))) + return ret != 0 +} + +func IsWindowEnabled(hwnd HWND) bool { + ret, _, _ := procIsWindowEnabled.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func IsWindowVisible(hwnd HWND) bool { + ret, _, _ := procIsWindowVisible.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func SetFocus(hwnd HWND) HWND { + ret, _, _ := procSetFocus.Call( + uintptr(hwnd)) + + return HWND(ret) +} + +func InvalidateRect(hwnd HWND, rect *RECT, erase bool) bool { + ret, _, _ := procInvalidateRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(rect)), + uintptr(BoolToBOOL(erase))) + + return ret != 0 +} + +func GetClientRect(hwnd HWND) *RECT { + var rect RECT + ret, _, _ := procGetClientRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&rect))) + + if ret == 0 { + panic(fmt.Sprintf("GetClientRect(%d) failed", hwnd)) + } + + return &rect +} + +func GetDC(hwnd HWND) HDC { + ret, _, _ := procGetDC.Call( + uintptr(hwnd)) + + return HDC(ret) +} + +func ReleaseDC(hwnd HWND, hDC HDC) bool { + ret, _, _ := procReleaseDC.Call( + uintptr(hwnd), + uintptr(hDC)) + + return ret != 0 +} + +func SetCapture(hwnd HWND) HWND { + ret, _, _ := procSetCapture.Call( + uintptr(hwnd)) + + return HWND(ret) +} + +func ReleaseCapture() bool { + ret, _, _ := procReleaseCapture.Call() + + return ret != 0 +} + +func GetWindowThreadProcessId(hwnd HWND) (HANDLE, int) { + var processId int + ret, _, _ := procGetWindowThreadProcessId.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&processId))) + + return HANDLE(ret), processId +} + +func MessageBox(hwnd HWND, title, caption string, flags uint) int { + ret, _, _ := procMessageBox.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(title))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(caption))), + uintptr(flags)) + + return int(ret) +} + +func GetSystemMetrics(index int) int { + ret, _, _ := procGetSystemMetrics.Call( + uintptr(index)) + + return int(ret) +} + +func CopyRect(dst, src *RECT) bool { + ret, _, _ := procCopyRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src))) + + return ret != 0 +} + +func EqualRect(rect1, rect2 *RECT) bool { + ret, _, _ := procEqualRect.Call( + uintptr(unsafe.Pointer(rect1)), + uintptr(unsafe.Pointer(rect2))) + + return ret != 0 +} + +func InflateRect(rect *RECT, dx, dy int) bool { + ret, _, _ := procInflateRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(dx), + uintptr(dy)) + + return ret != 0 +} + +func IntersectRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procIntersectRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func IsRectEmpty(rect *RECT) bool { + ret, _, _ := procIsRectEmpty.Call( + uintptr(unsafe.Pointer(rect))) + + return ret != 0 +} + +func OffsetRect(rect *RECT, dx, dy int) bool { + ret, _, _ := procOffsetRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(dx), + uintptr(dy)) + + return ret != 0 +} + +func PtInRect(rect *RECT, x, y int) bool { + pt := POINT{X: int32(x), Y: int32(y)} + ret, _, _ := procPtInRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(unsafe.Pointer(&pt))) + + return ret != 0 +} + +func SetRect(rect *RECT, left, top, right, bottom int) bool { + ret, _, _ := procSetRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(left), + uintptr(top), + uintptr(right), + uintptr(bottom)) + + return ret != 0 +} + +func SetRectEmpty(rect *RECT) bool { + ret, _, _ := procSetRectEmpty.Call( + uintptr(unsafe.Pointer(rect))) + + return ret != 0 +} + +func SubtractRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procSubtractRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func UnionRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procUnionRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func CreateDialog(hInstance HINSTANCE, lpTemplate *uint16, hWndParent HWND, lpDialogProc uintptr) HWND { + ret, _, _ := procCreateDialogParam.Call( + uintptr(hInstance), + uintptr(unsafe.Pointer(lpTemplate)), + uintptr(hWndParent), + lpDialogProc, + 0) + + return HWND(ret) +} + +func DialogBox(hInstance HINSTANCE, lpTemplateName *uint16, hWndParent HWND, lpDialogProc uintptr) int { + ret, _, _ := procDialogBoxParam.Call( + uintptr(hInstance), + uintptr(unsafe.Pointer(lpTemplateName)), + uintptr(hWndParent), + lpDialogProc, + 0) + + return int(ret) +} + +func GetDlgItem(hDlg HWND, nIDDlgItem int) HWND { + ret, _, _ := procGetDlgItem.Call( + uintptr(unsafe.Pointer(hDlg)), + uintptr(nIDDlgItem)) + + return HWND(ret) +} + +func DrawIcon(hDC HDC, x, y int, hIcon HICON) bool { + ret, _, _ := procDrawIcon.Call( + uintptr(unsafe.Pointer(hDC)), + uintptr(x), + uintptr(y), + uintptr(unsafe.Pointer(hIcon))) + + return ret != 0 +} + +func ClientToScreen(hwnd HWND, x, y int) (int, int) { + pt := POINT{X: int32(x), Y: int32(y)} + + procClientToScreen.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y) +} + +func IsDialogMessage(hwnd HWND, msg *MSG) bool { + ret, _, _ := procIsDialogMessage.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(msg))) + + return ret != 0 +} + +func IsWindow(hwnd HWND) bool { + ret, _, _ := procIsWindow.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func EndDialog(hwnd HWND, nResult uintptr) bool { + ret, _, _ := procEndDialog.Call( + uintptr(hwnd), + nResult) + + return ret != 0 +} + +func PeekMessage(lpMsg *MSG, hwnd HWND, wMsgFilterMin, wMsgFilterMax, wRemoveMsg uint32) bool { + ret, _, _ := procPeekMessage.Call( + uintptr(unsafe.Pointer(lpMsg)), + uintptr(hwnd), + uintptr(wMsgFilterMin), + uintptr(wMsgFilterMax), + uintptr(wRemoveMsg)) + + return ret != 0 +} + +func TranslateAccelerator(hwnd HWND, hAccTable HACCEL, lpMsg *MSG) bool { + ret, _, _ := procTranslateMessage.Call( + uintptr(hwnd), + uintptr(hAccTable), + uintptr(unsafe.Pointer(lpMsg))) + + return ret != 0 +} + +func SetWindowPos(hwnd, hWndInsertAfter HWND, x, y, cx, cy int, uFlags uint) bool { + ret, _, _ := procSetWindowPos.Call( + uintptr(hwnd), + uintptr(hWndInsertAfter), + uintptr(x), + uintptr(y), + uintptr(cx), + uintptr(cy), + uintptr(uFlags)) + + return ret != 0 +} + +func FillRect(hDC HDC, lprc *RECT, hbr HBRUSH) bool { + ret, _, _ := procFillRect.Call( + uintptr(hDC), + uintptr(unsafe.Pointer(lprc)), + uintptr(hbr)) + + return ret != 0 +} + +func DrawText(hDC HDC, text string, uCount int, lpRect *RECT, uFormat uint) int { + ret, _, _ := procDrawText.Call( + uintptr(hDC), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(text))), + uintptr(uCount), + uintptr(unsafe.Pointer(lpRect)), + uintptr(uFormat)) + + return int(ret) +} + +func AddClipboardFormatListener(hwnd HWND) bool { + ret, _, _ := procAddClipboardFormatListener.Call( + uintptr(hwnd)) + return ret != 0 +} + +func RemoveClipboardFormatListener(hwnd HWND) bool { + ret, _, _ := procRemoveClipboardFormatListener.Call( + uintptr(hwnd)) + return ret != 0 +} + +func OpenClipboard(hWndNewOwner HWND) bool { + ret, _, _ := procOpenClipboard.Call( + uintptr(hWndNewOwner)) + return ret != 0 +} + +func CloseClipboard() bool { + ret, _, _ := procCloseClipboard.Call() + return ret != 0 +} + +func EnumClipboardFormats(format uint) uint { + ret, _, _ := procEnumClipboardFormats.Call( + uintptr(format)) + return uint(ret) +} + +func GetClipboardData(uFormat uint) HANDLE { + ret, _, _ := procGetClipboardData.Call( + uintptr(uFormat)) + return HANDLE(ret) +} + +func SetClipboardData(uFormat uint, hMem HANDLE) HANDLE { + ret, _, _ := procSetClipboardData.Call( + uintptr(uFormat), + uintptr(hMem)) + return HANDLE(ret) +} + +func EmptyClipboard() bool { + ret, _, _ := procEmptyClipboard.Call() + return ret != 0 +} + +func GetClipboardFormatName(format uint) (string, bool) { + cchMaxCount := 255 + buf := make([]uint16, cchMaxCount) + ret, _, _ := procGetClipboardFormatName.Call( + uintptr(format), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(cchMaxCount)) + + if ret > 0 { + return syscall.UTF16ToString(buf), true + } + + return "Requested format does not exist or is predefined", false +} + +func IsClipboardFormatAvailable(format uint) bool { + ret, _, _ := procIsClipboardFormatAvailable.Call(uintptr(format)) + return ret != 0 +} + +func BeginPaint(hwnd HWND, paint *PAINTSTRUCT) HDC { + ret, _, _ := procBeginPaint.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(paint))) + return HDC(ret) +} + +func EndPaint(hwnd HWND, paint *PAINTSTRUCT) { + procBeginPaint.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(paint))) +} + +func GetKeyboardState(lpKeyState *[]byte) bool { + ret, _, _ := procGetKeyboardState.Call( + uintptr(unsafe.Pointer(&(*lpKeyState)[0]))) + return ret != 0 +} + +func MapVirtualKeyEx(uCode, uMapType uint, dwhkl HKL) uint { + ret, _, _ := procMapVirtualKey.Call( + uintptr(uCode), + uintptr(uMapType), + uintptr(dwhkl)) + return uint(ret) +} + +func GetAsyncKeyState(vKey int) uint16 { + ret, _, _ := procGetAsyncKeyState.Call(uintptr(vKey)) + return uint16(ret) +} + +func ToAscii(uVirtKey, uScanCode uint, lpKeyState *byte, lpChar *uint16, uFlags uint) int { + ret, _, _ := procToAscii.Call( + uintptr(uVirtKey), + uintptr(uScanCode), + uintptr(unsafe.Pointer(lpKeyState)), + uintptr(unsafe.Pointer(lpChar)), + uintptr(uFlags)) + return int(ret) +} + +func SwapMouseButton(fSwap bool) bool { + ret, _, _ := procSwapMouseButton.Call( + uintptr(BoolToBOOL(fSwap))) + return ret != 0 +} + +func GetCursorPos() (x, y int, ok bool) { + pt := POINT{} + ret, _, _ := procGetCursorPos.Call(uintptr(unsafe.Pointer(&pt))) + return int(pt.X), int(pt.Y), ret != 0 +} + +func SetCursorPos(x, y int) bool { + ret, _, _ := procSetCursorPos.Call( + uintptr(x), + uintptr(y), + ) + return ret != 0 +} + +func SetCursor(cursor HCURSOR) HCURSOR { + ret, _, _ := procSetCursor.Call( + uintptr(cursor), + ) + return HCURSOR(ret) +} + +func CreateIcon(instance HINSTANCE, nWidth, nHeight int, cPlanes, cBitsPerPixel byte, ANDbits, XORbits *byte) HICON { + ret, _, _ := procCreateIcon.Call( + uintptr(instance), + uintptr(nWidth), + uintptr(nHeight), + uintptr(cPlanes), + uintptr(cBitsPerPixel), + uintptr(unsafe.Pointer(ANDbits)), + uintptr(unsafe.Pointer(XORbits)), + ) + return HICON(ret) +} + +func DestroyIcon(icon HICON) bool { + ret, _, _ := procDestroyIcon.Call( + uintptr(icon), + ) + return ret != 0 +} + +func MonitorFromPoint(x, y int, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromPoint.Call( + uintptr(x), + uintptr(y), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func MonitorFromRect(rc *RECT, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromRect.Call( + uintptr(unsafe.Pointer(rc)), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func MonitorFromWindow(hwnd HWND, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromWindow.Call( + uintptr(hwnd), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func GetMonitorInfo(hMonitor HMONITOR, lmpi *MONITORINFO) bool { + ret, _, _ := procGetMonitorInfo.Call( + uintptr(hMonitor), + uintptr(unsafe.Pointer(lmpi)), + ) + return ret != 0 +} + +func EnumDisplayMonitors(hdc HDC, clip *RECT, fnEnum, dwData uintptr) bool { + ret, _, _ := procEnumDisplayMonitors.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(clip)), + fnEnum, + dwData, + ) + return ret != 0 +} + +func EnumDisplaySettingsEx(szDeviceName *uint16, iModeNum uint32, devMode *DEVMODE, dwFlags uint32) bool { + ret, _, _ := procEnumDisplaySettingsEx.Call( + uintptr(unsafe.Pointer(szDeviceName)), + uintptr(iModeNum), + uintptr(unsafe.Pointer(devMode)), + uintptr(dwFlags), + ) + return ret != 0 +} + +func ChangeDisplaySettingsEx(szDeviceName *uint16, devMode *DEVMODE, hwnd HWND, dwFlags uint32, lParam uintptr) int32 { + ret, _, _ := procChangeDisplaySettingsEx.Call( + uintptr(unsafe.Pointer(szDeviceName)), + uintptr(unsafe.Pointer(devMode)), + uintptr(hwnd), + uintptr(dwFlags), + lParam, + ) + return int32(ret) +} + +/* remove to build without cgo +func SendInput(inputs []INPUT) uint32 { + var validInputs []C.INPUT + + for _, oneInput := range inputs { + input := C.INPUT{_type: C.DWORD(oneInput.Type)} + + switch oneInput.Type { + case INPUT_MOUSE: + (*MouseInput)(unsafe.Pointer(&input)).mi = oneInput.Mi + case INPUT_KEYBOARD: + (*KbdInput)(unsafe.Pointer(&input)).ki = oneInput.Ki + case INPUT_HARDWARE: + (*HardwareInput)(unsafe.Pointer(&input)).hi = oneInput.Hi + default: + panic("unkown type") + } + + validInputs = append(validInputs, input) + } + + ret, _, _ := procSendInput.Call( + uintptr(len(validInputs)), + uintptr(unsafe.Pointer(&validInputs[0])), + uintptr(unsafe.Sizeof(C.INPUT{})), + ) + return uint32(ret) +} +*/ diff --git a/vendor/github.com/shirou/w32/utils.go b/vendor/github.com/shirou/w32/utils.go new file mode 100644 index 00000000..69aa31a4 --- /dev/null +++ b/vendor/github.com/shirou/w32/utils.go @@ -0,0 +1,203 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +func MakeIntResource(id uint16) *uint16 { + return (*uint16)(unsafe.Pointer(uintptr(id))) +} + +func LOWORD(dw uint32) uint16 { + return uint16(dw) +} + +func HIWORD(dw uint32) uint16 { + return uint16(dw >> 16 & 0xffff) +} + +func BoolToBOOL(value bool) BOOL { + if value { + return 1 + } + + return 0 +} + +func UTF16PtrToString(cstr *uint16) string { + if cstr != nil { + us := make([]uint16, 0, 256) + for p := uintptr(unsafe.Pointer(cstr)); ; p += 2 { + u := *(*uint16)(unsafe.Pointer(p)) + if u == 0 { + return string(utf16.Decode(us)) + } + us = append(us, u) + } + } + + return "" +} + +func ComAddRef(unknown *IUnknown) int32 { + ret, _, _ := syscall.Syscall(unknown.lpVtbl.pAddRef, 1, + uintptr(unsafe.Pointer(unknown)), + 0, + 0) + return int32(ret) +} + +func ComRelease(unknown *IUnknown) int32 { + ret, _, _ := syscall.Syscall(unknown.lpVtbl.pRelease, 1, + uintptr(unsafe.Pointer(unknown)), + 0, + 0) + return int32(ret) +} + +func ComQueryInterface(unknown *IUnknown, id *GUID) *IDispatch { + var disp *IDispatch + hr, _, _ := syscall.Syscall(unknown.lpVtbl.pQueryInterface, 3, + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(id)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + panic("Invoke QieryInterface error.") + } + return disp +} + +func ComGetIDsOfName(disp *IDispatch, names []string) []int32 { + wnames := make([]*uint16, len(names)) + dispid := make([]int32, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + hr, _, _ := syscall.Syscall6(disp.lpVtbl.pGetIDsOfNames, 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(len(names)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + panic("Invoke GetIDsOfName error.") + } + return dispid +} + +func ComInvoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.RgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.CNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch v.(type) { + case bool: + if v.(bool) { + vargs[n] = VARIANT{VT_BOOL, 0, 0, 0, 0xffff} + } else { + vargs[n] = VARIANT{VT_BOOL, 0, 0, 0, 0} + } + case *bool: + vargs[n] = VARIANT{VT_BOOL | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*bool))))} + case byte: + vargs[n] = VARIANT{VT_I1, 0, 0, 0, int64(v.(byte))} + case *byte: + vargs[n] = VARIANT{VT_I1 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*byte))))} + case int16: + vargs[n] = VARIANT{VT_I2, 0, 0, 0, int64(v.(int16))} + case *int16: + vargs[n] = VARIANT{VT_I2 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int16))))} + case uint16: + vargs[n] = VARIANT{VT_UI2, 0, 0, 0, int64(v.(int16))} + case *uint16: + vargs[n] = VARIANT{VT_UI2 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint16))))} + case int, int32: + vargs[n] = VARIANT{VT_UI4, 0, 0, 0, int64(v.(int))} + case *int, *int32: + vargs[n] = VARIANT{VT_I4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int))))} + case uint, uint32: + vargs[n] = VARIANT{VT_UI4, 0, 0, 0, int64(v.(uint))} + case *uint, *uint32: + vargs[n] = VARIANT{VT_UI4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint))))} + case int64: + vargs[n] = VARIANT{VT_I8, 0, 0, 0, v.(int64)} + case *int64: + vargs[n] = VARIANT{VT_I8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int64))))} + case uint64: + vargs[n] = VARIANT{VT_UI8, 0, 0, 0, int64(v.(uint64))} + case *uint64: + vargs[n] = VARIANT{VT_UI8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint64))))} + case float32: + vargs[n] = VARIANT{VT_R4, 0, 0, 0, int64(v.(float32))} + case *float32: + vargs[n] = VARIANT{VT_R4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*float32))))} + case float64: + vargs[n] = VARIANT{VT_R8, 0, 0, 0, int64(v.(float64))} + case *float64: + vargs[n] = VARIANT{VT_R8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*float64))))} + case string: + vargs[n] = VARIANT{VT_BSTR, 0, 0, 0, int64(uintptr(unsafe.Pointer(SysAllocString(v.(string)))))} + case *string: + vargs[n] = VARIANT{VT_BSTR | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*string))))} + case *IDispatch: + vargs[n] = VARIANT{VT_DISPATCH, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))} + case **IDispatch: + vargs[n] = VARIANT{VT_DISPATCH | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))} + case nil: + vargs[n] = VARIANT{VT_NULL, 0, 0, 0, 0} + case *VARIANT: + vargs[n] = VARIANT{VT_VARIANT | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))} + default: + panic("unknown type") + } + } + dispparams.Rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.CArgs = uint32(len(params)) + } + + var ret VARIANT + var excepInfo EXCEPINFO + VariantInit(&ret) + hr, _, _ := syscall.Syscall9(disp.lpVtbl.pInvoke, 8, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(&ret)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + if excepInfo.BstrDescription != nil { + bs := UTF16PtrToString(excepInfo.BstrDescription) + panic(bs) + } + } + for _, varg := range vargs { + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + } + result = &ret + return +} diff --git a/vendor/github.com/shirou/w32/vars.go b/vendor/github.com/shirou/w32/vars.go new file mode 100644 index 00000000..2dab2e39 --- /dev/null +++ b/vendor/github.com/shirou/w32/vars.go @@ -0,0 +1,13 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +var ( + IID_NULL = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}} + IID_IUnknown = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}} + IID_IDispatch = &GUID{0x00020400, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}} + IID_IConnectionPointContainer = &GUID{0xB196B284, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}} + IID_IConnectionPoint = &GUID{0xB196B286, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}} +) diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore new file mode 100644 index 00000000..3bcd8cba --- /dev/null +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -0,0 +1,5 @@ +*.prof +*.test +*.swp +/bin/ +cover.out diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml new file mode 100644 index 00000000..a60300c5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -0,0 +1,17 @@ +language: go +go_import_path: go.etcd.io/bbolt + +sudo: false + +go: +- 1.11 + +before_install: +- go get -v honnef.co/go/tools/... +- go get -v github.com/kisielk/errcheck + +script: +- make fmt +- make test +- make race +# - make errcheck diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/vendor/go.etcd.io/bbolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile new file mode 100644 index 00000000..2968aaa6 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -0,0 +1,38 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" + @echo "array freelist test" + @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt + +test: + TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt + + @echo "array freelist test" + + @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md new file mode 100644 index 00000000..e9989efc --- /dev/null +++ b/vendor/go.etcd.io/bbolt/README.md @@ -0,0 +1,954 @@ +bbolt +===== + +[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) +[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) +[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) +[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Project versioning + +bbolt uses [semantic versioning](http://semver.org). +API should not change between patch and minor releases. +New minor versions may add additional features to the API. + +## Table of Contents + + - [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) + - [Resources](#resources) + - [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) + - [Caveats & Limitations](#caveats--limitations) + - [Reading the Source](#reading-the-source) + - [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get go.etcd.io/bbolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Importing bbolt + +To use bbolt as an embedded key-value store, import as: + +```go +import bolt "go.etcd.io/bbolt" + +db, err := bolt.Open(path, 0666, nil) +if err != nil { + return err +} +defer db.Close() +``` + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + bolt "go.etcd.io/bbolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go new file mode 100644 index 00000000..4d35ee7c --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_386.go @@ -0,0 +1,10 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go new file mode 100644 index 00000000..60a52dad --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go new file mode 100644 index 00000000..105d27dd --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bbolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go new file mode 100644 index 00000000..f5aa2a5e --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go new file mode 100644 index 00000000..7707bcac --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bbolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go new file mode 100644 index 00000000..baeb289f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go new file mode 100644 index 00000000..2d9b1a91 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -0,0 +1,12 @@ +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go new file mode 100644 index 00000000..d7f50358 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bbolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go new file mode 100644 index 00000000..69804714 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -0,0 +1,12 @@ +// +build ppc + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go new file mode 100644 index 00000000..35659085 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go new file mode 100644 index 00000000..422c7c69 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go new file mode 100644 index 00000000..6d3fcb82 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go new file mode 100644 index 00000000..5f2bb514 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -0,0 +1,93 @@ +// +build !windows,!plan9,!solaris + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } + for { + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go new file mode 100644 index 00000000..babad657 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go @@ -0,0 +1,88 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go new file mode 100644 index 00000000..fca178bd --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -0,0 +1,141 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + for { + // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range + // -1..0 as the lock on the database file. + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go new file mode 100644 index 00000000..9587afef --- /dev/null +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bbolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go new file mode 100644 index 00000000..84bfd4d6 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -0,0 +1,775 @@ +package bbolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go new file mode 100644 index 00000000..3000aced --- /dev/null +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -0,0 +1,396 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go new file mode 100644 index 00000000..962248c9 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/db.go @@ -0,0 +1,1164 @@ +package bbolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "sort" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +const pgidNoFreelist pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + +// FreelistType is the type of the freelist backend +type FreelistType string + +const ( + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + stats Stats + + freelist *freelist + freelistLoad sync.Once + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + db := &DB{ + opened: true, + } + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoSync = options.NoSync + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync + db.FreelistType = options.FreelistType + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + _ = db.close() + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + db.pageSize = int(m.pageSize) + } + } else { + _ = db.close() + return nil, ErrInvalid + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } + + // Mark the database as opened and return. + return db, nil +} + +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist(db.FreelistType) + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = db.freelist.free_count() + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// It will block waiting for any open transactions to finish +// before closing the database and returning. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + db.freePages() + return t, nil +} + +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid + } + if minid > 0 { + db.freelist.release(minid - 1) + } + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. +} + +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Rollback() +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(txid txid, count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(txid, count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, + FreelistType: FreelistArrayType, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go new file mode 100644 index 00000000..95f25f01 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/doc.go @@ -0,0 +1,44 @@ +/* +package bbolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go new file mode 100644 index 00000000..48758ca5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -0,0 +1,71 @@ +package bbolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go new file mode 100644 index 00000000..93fd85d5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -0,0 +1,370 @@ +package bbolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[pgid]struct{} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + freelistType FreelistType // freelist type + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[pgid]uint64 // key is start pgid, value is its span size + backwardMap map[pgid]uint64 // key is end pgid, value is its span size + allocate func(txid txid, n int) pgid // the freelist allocate func + free_count func() int // the function which gives you free page number + mergeSpans func(ids pgids) // the mergeSpan func + getFreePageIDs func() []pgid // get free pgids func + readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist(freelistType FreelistType) *freelist { + f := &freelist{ + freelistType: freelistType, + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]bool), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[pgid]uint64), + backwardMap: make(map[pgid]uint64), + } + + if freelistType == FreelistMapType { + f.allocate = f.hashmapAllocate + f.free_count = f.hashmapFreeCount + f.mergeSpans = f.hashmapMergeSpans + f.getFreePageIDs = f.hashmapGetFreePageIDs + f.readIDs = f.hashmapReadIDs + } else { + f.allocate = f.arrayAllocate + f.free_count = f.arrayFreeCount + f.mergeSpans = f.arrayMergeSpans + f.getFreePageIDs = f.arrayGetFreePageIDs + f.readIDs = f.arrayReadIDs + } + + return f +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// arrayFreeCount returns count of free pages(array version) +func (f *freelist) arrayFreeCount() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.getFreePageIDs(), m) +} + +// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) arrayAllocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = true + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + f.mergeSpans(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]pgid, count) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(pgids(idsCopy)) + + f.readIDs(idsCopy) + } +} + +// arrayReadIDs initializes the freelist from a given list of ids. +func (f *freelist) arrayReadIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +func (f *freelist) arrayGetFreePageIDs() []pgid { + return f.ids +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.getFreePageIDs() { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + ids := f.getFreePageIDs() + f.cache = make(map[pgid]bool, len(ids)) + for _, id := range ids { + f.cache[id] = true + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = true + } + } +} + +// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array +func (f *freelist) arrayMergeSpans(ids pgids) { + sort.Sort(ids) + f.ids = pgids(f.ids).merge(ids) +} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go new file mode 100644 index 00000000..6a03a6c3 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -0,0 +1,178 @@ +package bbolt + +import "sort" + +// hashmapFreeCount returns count of free pages(hashmap version) +func (f *freelist) hashmapFreeCount() int { + // use the forwardmap to get the total count + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend +func (f *freelist) hashmapAllocate(txid txid, n int) pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+pgid(i)) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, uint64(size)) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+pgid(n), remain) + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+pgid(i)) + } + return pid + } + } + + return 0 +} + +// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) +func (f *freelist) hashmapReadIDs(pgids []pgid) { + f.init(pgids) + + // Rebuild the page cache. + f.reindex() +} + +// hashmapGetFreePageIDs returns the sorted free page ids +func (f *freelist) hashmapGetFreePageIDs() []pgid { + count := f.free_count() + if count == 0 { + return nil + } + + m := make([]pgid, 0, count) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + m = append(m, start+pgid(i)) + } + } + sort.Sort(pgids(m)) + + return m +} + +// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans +func (f *freelist) hashmapMergeSpans(ids pgids) { + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *freelist) mergeWithExistingSpan(pid pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - pgid(preSize) + f.delSpan(start, preSize) + + newStart -= pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +func (f *freelist) addSpan(start pgid, size uint64) { + f.backwardMap[start-1+pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} +} + +func (f *freelist) delSpan(start pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } +} + +// initial from pgids using when use hashmap version +// pgids must be sorted +func (f *freelist) init(pgids []pgid) { + if len(pgids) == 0 { + return + } + + size := uint64(1) + start := pgids[0] + + if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[pgid]uint64) + f.backwardMap = make(map[pgid]uint64) + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } +} diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go new file mode 100644 index 00000000..6c3fa553 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/node.go @@ -0,0 +1,604 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go new file mode 100644 index 00000000..bca9615f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/page.go @@ -0,0 +1,197 @@ +package bbolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go new file mode 100644 index 00000000..f5086414 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -0,0 +1,707 @@ +package bbolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + return fn(k, tx.root.Bucket(k)) + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + } + + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { + return err + } + } else { + tx.meta.freelist = pgidNoFreelist + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, nil +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(tx.meta.txid, count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount += count + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 00000000..6d4d1be7 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 00000000..0a4504f1 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 00000000..0f3769e5 --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 00000000..8765c9fb --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 00000000..1ef26307 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,51 @@ +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + +# For pre go1.6 +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i ./... + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race ./... + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + + +.PHONY: lint +lint: + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @golint $$(go list ./...) 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 00000000..62eb8e57 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +___ +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 00000000..1db6849f --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 00000000..0489d19b --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 00000000..3c72c599 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 00000000..4cf608ec --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 00000000..ede8136f --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..c160e1a4 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,289 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..4d31dd0f --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..5593b1b3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,750 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 00000000..30e2fcd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..578e947b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,281 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..da156a1b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..efd689af --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 00000000..bbb86efe --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD as specified in RFC 7539, +// and its extended nonce variant XChaCha20-Poly1305. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "encoding/binary" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 +) + +type chacha20poly1305 struct { + key [8]uint32 +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + ret.key[0] = binary.LittleEndian.Uint32(key[0:4]) + ret.key[1] = binary.LittleEndian.Uint32(key[4:8]) + ret.key[2] = binary.LittleEndian.Uint32(key[8:12]) + ret.key[3] = binary.LittleEndian.Uint32(key[12:16]) + ret.key[4] = binary.LittleEndian.Uint32(key[16:20]) + ret.key[5] = binary.LittleEndian.Uint32(key[20:24]) + ret.key[6] = binary.LittleEndian.Uint32(key[24:28]) + ret.key[7] = binary.LittleEndian.Uint32(key[28:32]) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return 16 +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 00000000..2aa4fd89 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/subtle" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[8]uint32, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = key[0] + state[5] = key[1] + state[6] = key[2] + state[7] = key[3] + state[8] = key[4] + state[9] = key[5] + state[10] = key[6] + state[11] = key[7] + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if subtle.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if subtle.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s new file mode 100644 index 00000000..9dd5d7a9 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -0,0 +1,2695 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" +// General register allocation +#define oup DI +#define inp SI +#define inl BX +#define adp CX // free to reuse, after we hash the additional data +#define keyp R8 // free to reuse, when we copy the key to stack +#define itr2 R9 // general iterator +#define itr1 CX // general iterator +#define acc0 R10 +#define acc1 R11 +#define acc2 R12 +#define t0 R13 +#define t1 R14 +#define t2 R15 +#define t3 R8 +// Register and stack allocation for the SSE code +#define rStore (0*16)(BP) +#define sStore (1*16)(BP) +#define state1Store (2*16)(BP) +#define state2Store (3*16)(BP) +#define tmpStore (4*16)(BP) +#define ctr0Store (5*16)(BP) +#define ctr1Store (6*16)(BP) +#define ctr2Store (7*16)(BP) +#define ctr3Store (8*16)(BP) +#define A0 X0 +#define A1 X1 +#define A2 X2 +#define B0 X3 +#define B1 X4 +#define B2 X5 +#define C0 X6 +#define C1 X7 +#define C2 X8 +#define D0 X9 +#define D1 X10 +#define D2 X11 +#define T0 X12 +#define T1 X13 +#define T2 X14 +#define T3 X15 +#define A3 T0 +#define B3 T1 +#define C3 T2 +#define D3 T3 +// Register and stack allocation for the AVX2 code +#define rsStoreAVX2 (0*32)(BP) +#define state1StoreAVX2 (1*32)(BP) +#define state2StoreAVX2 (2*32)(BP) +#define ctr0StoreAVX2 (3*32)(BP) +#define ctr1StoreAVX2 (4*32)(BP) +#define ctr2StoreAVX2 (5*32)(BP) +#define ctr3StoreAVX2 (6*32)(BP) +#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack +#define AA0 Y0 +#define AA1 Y5 +#define AA2 Y6 +#define AA3 Y7 +#define BB0 Y14 +#define BB1 Y9 +#define BB2 Y10 +#define BB3 Y11 +#define CC0 Y12 +#define CC1 Y13 +#define CC2 Y8 +#define CC3 Y15 +#define DD0 Y4 +#define DD1 Y1 +#define DD2 Y2 +#define DD3 Y3 +#define TT0 DD3 +#define TT1 AA3 +#define TT2 BB3 +#define TT3 CC3 +// ChaCha20 constants +DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 +// Some macros +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t0, t1 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 00000000..c2797121 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/poly1305" +) + +func roundTo16(n int) int { + return 16 * ((n + 15) / 16) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + if subtle.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s := chacha20.New(c.key, [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + }) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip the next 32 bytes + s.XORKeyStream(out, plaintext) + + polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(plaintext))+8+8) + copy(polyInput, additionalData) + copy(polyInput[roundTo16(len(additionalData)):], out[:len(plaintext)]) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData))) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(plaintext))) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, polyInput, &polyKey) + copy(out[len(plaintext):], tag[:]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + var tag [poly1305.TagSize]byte + copy(tag[:], ciphertext[len(ciphertext)-16:]) + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s := chacha20.New(c.key, [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + }) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip the next 32 bytes + + polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(ciphertext))+8+8) + copy(polyInput, additionalData) + copy(polyInput[roundTo16(len(additionalData)):], ciphertext) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData))) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(ciphertext))) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if subtle.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !poly1305.Verify(&tag, polyInput, &polyKey) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 00000000..4c2eb703 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 !go1.7 gccgo appengine + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go new file mode 100644 index 00000000..a02fa571 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go @@ -0,0 +1,104 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + + "golang.org/x/crypto/internal/chacha20" +) + +type xchacha20poly1305 struct { + key [8]uint32 +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + ret.key[0] = binary.LittleEndian.Uint32(key[0:4]) + ret.key[1] = binary.LittleEndian.Uint32(key[4:8]) + ret.key[2] = binary.LittleEndian.Uint32(key[8:12]) + ret.key[3] = binary.LittleEndian.Uint32(key[12:16]) + ret.key[4] = binary.LittleEndian.Uint32(key[16:20]) + ret.key[5] = binary.LittleEndian.Uint32(key[20:24]) + ret.key[6] = binary.LittleEndian.Uint32(key[24:28]) + ret.key[7] = binary.LittleEndian.Uint32(key[28:32]) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return 16 +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + hNonce := [4]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + binary.LittleEndian.Uint32(nonce[12:16]), + } + c := &chacha20poly1305{ + key: chacha20.HChaCha20(&x.key, &hNonce), + } + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + hNonce := [4]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + binary.LittleEndian.Uint32(nonce[12:16]), + } + c := &chacha20poly1305{ + key: chacha20.HChaCha20(&x.key, &hNonce), + } + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 00000000..528b9bff --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,751 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 INTEGER and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if uint32(int(length)) != length || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 00000000..cda8e3ed --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 00000000..ca7b1db5 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 00000000..39bf98ae --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,166 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + if int(length) < 0 { + // This currently cannot overflow because we read uint24 at most, but check + // anyway in case that changes in the future. + return false + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 00000000..b3f74162 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 00000000..ee7b4bd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 00000000..cd793a5b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 00000000..75f24bab --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 0x7fffff) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 00000000..da9b10d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 00000000..39081610 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 00000000..e0ac30c7 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 00000000..5822bd53 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 00000000..1f76d1a3 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R8,R9 + ANDQ SI,R8 + SHLQ $13,R10,R11 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BX,BP + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 00000000..07511a45 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,CX,R8 + ANDQ SI,CX + SHLQ $13,R9,R10 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R11,R12 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R13,R14 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,R15,BX + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 00000000..dda3f143 --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s new file mode 100644 index 00000000..b3a16ef7 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s @@ -0,0 +1,308 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo,!appengine + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s new file mode 100644 index 00000000..cde3fc98 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s @@ -0,0 +1,668 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Original code can be found at the link below: +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91e5c39ca79126a4a876d5d8ff + +// There are some differences between CRYPTOGAMS code and this one. The round +// loop for "_int" isn't the same as the original. Some adjustments were +// necessary because there are less vector registers available. For example, some +// X variables (r12, r13, r14, and r15) share the same register used by the +// counter. The original code uses ctr to name the counter. Here we use CNT +// because golang uses CTR as the counter register name. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 + +#define TEMP R8 + +#define X0 R11 +#define X1 R12 +#define X2 R14 +#define X3 R15 +#define X4 R16 +#define X5 R17 +#define X6 R18 +#define X7 R19 +#define X8 R20 +#define X9 R21 +#define X10 R22 +#define X11 R23 +#define X12 R24 +#define X13 R25 +#define X14 R26 +#define X15 R27 + +#define CON0 X0 +#define CON1 X1 +#define CON2 X2 +#define CON3 X3 + +#define KEY0 X4 +#define KEY1 X5 +#define KEY2 X6 +#define KEY3 X7 +#define KEY4 X8 +#define KEY5 X9 +#define KEY6 X10 +#define KEY7 X11 + +#define CNT0 X12 +#define CNT1 X13 +#define CNT2 X14 +#define CNT3 X15 + +#define TMP0 R9 +#define TMP1 R10 +#define TMP2 R28 +#define TMP3 R29 + +#define CONSTS R8 + +#define A0 V0 +#define B0 V1 +#define C0 V2 +#define D0 V3 +#define A1 V4 +#define B1 V5 +#define C1 V6 +#define D1 V7 +#define A2 V8 +#define B2 V9 +#define C2 V10 +#define D2 V11 +#define T0 V12 +#define T1 V13 +#define T2 V14 + +#define K0 V15 +#define K1 V16 +#define K2 V17 +#define K3 V18 +#define K4 V19 +#define K5 V20 + +#define FOUR V21 +#define SIXTEEN V22 +#define TWENTY4 V23 +#define TWENTY V24 +#define TWELVE V25 +#define TWENTY5 V26 +#define SEVEN V27 + +#define INPPERM V28 +#define OUTPERM V29 +#define OUTMASK V30 + +#define DD0 V31 +#define DD1 SEVEN +#define DD2 T0 +#define DD3 T1 +#define DD4 T2 + +DATA ·consts+0x00(SB)/8, $0x3320646e61707865 +DATA ·consts+0x08(SB)/8, $0x6b20657479622d32 +DATA ·consts+0x10(SB)/8, $0x0000000000000001 +DATA ·consts+0x18(SB)/8, $0x0000000000000000 +DATA ·consts+0x20(SB)/8, $0x0000000000000004 +DATA ·consts+0x28(SB)/8, $0x0000000000000000 +DATA ·consts+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA ·consts+0x38(SB)/8, $0x0203000106070405 +DATA ·consts+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA ·consts+0x48(SB)/8, $0x0102030005060704 +GLOBL ·consts(SB), RODATA, $80 + +//func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[32]byte, counter *[16]byte) +TEXT ·chaCha20_ctr32_vmx(SB),NOSPLIT|NOFRAME,$0 + // Load the arguments inside the registers + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + MOVD $·consts(SB), CONSTS // point to consts addr + + MOVD $16, X0 + MOVD $32, X1 + MOVD $48, X2 + MOVD $64, X3 + MOVD $31, X4 + MOVD $15, X5 + + // Load key + LVX (KEY)(R0), K1 + LVSR (KEY)(R0), T0 + LVX (KEY)(X0), K2 + LVX (KEY)(X4), DD0 + + // Load counter + LVX (CNT)(R0), K3 + LVSR (CNT)(R0), T1 + LVX (CNT)(X5), DD1 + + // Load constants + LVX (CONSTS)(R0), K0 + LVX (CONSTS)(X0), K5 + LVX (CONSTS)(X1), FOUR + LVX (CONSTS)(X2), SIXTEEN + LVX (CONSTS)(X3), TWENTY4 + + // Align key and counter + VPERM K2, K1, T0, K1 + VPERM DD0, K2, T0, K2 + VPERM DD1, K3, T1, K3 + + // Load counter to GPR + MOVWZ 0(CNT), CNT0 + MOVWZ 4(CNT), CNT1 + MOVWZ 8(CNT), CNT2 + MOVWZ 12(CNT), CNT3 + + // Adjust vectors for the initial state + VADDUWM K3, K5, K3 + VADDUWM K3, K5, K4 + VADDUWM K4, K5, K5 + + // Synthesized constants + VSPLTISW $-12, TWENTY + VSPLTISW $12, TWELVE + VSPLTISW $-7, TWENTY5 + + VXOR T0, T0, T0 + VSPLTISW $-1, OUTMASK + LVSR (INP)(R0), INPPERM + LVSL (OUT)(R0), OUTPERM + VPERM OUTMASK, T0, OUTPERM, OUTMASK + +loop_outer_vmx: + // Load constant + MOVD $0x61707865, CON0 + MOVD $0x3320646e, CON1 + MOVD $0x79622d32, CON2 + MOVD $0x6b206574, CON3 + + VOR K0, K0, A0 + VOR K0, K0, A1 + VOR K0, K0, A2 + VOR K1, K1, B0 + + MOVD $10, TEMP + + // Load key to GPR + MOVWZ 0(KEY), X4 + MOVWZ 4(KEY), X5 + MOVWZ 8(KEY), X6 + MOVWZ 12(KEY), X7 + VOR K1, K1, B1 + VOR K1, K1, B2 + MOVWZ 16(KEY), X8 + MOVWZ 0(CNT), X12 + MOVWZ 20(KEY), X9 + MOVWZ 4(CNT), X13 + VOR K2, K2, C0 + VOR K2, K2, C1 + MOVWZ 24(KEY), X10 + MOVWZ 8(CNT), X14 + VOR K2, K2, C2 + VOR K3, K3, D0 + MOVWZ 28(KEY), X11 + MOVWZ 12(CNT), X15 + VOR K4, K4, D1 + VOR K5, K5, D2 + + MOVD X4, TMP0 + MOVD X5, TMP1 + MOVD X6, TMP2 + MOVD X7, TMP3 + VSPLTISW $7, SEVEN + + MOVD TEMP, CTR + +loop_vmx: + // CRYPTOGAMS uses a macro to create a loop using perl. This isn't possible + // using assembly macros. Therefore, the macro expansion result was used + // in order to maintain the algorithm efficiency. + // This loop generates three keystream blocks using VMX instructions and, + // in parallel, one keystream block using scalar instructions. + ADD X4, X0, X0 + ADD X5, X1, X1 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VPERM D0, D0, SIXTEEN, D0 + VPERM D1, D1, SIXTEEN, D1 + ROTLW $16, X12, X12 + ROTLW $16, X13, X13 + VPERM D2, D2, SIXTEEN, D2 + VADDUWM C0, D0, C0 + ROTLW $16, X14, X14 + ROTLW $16, X15, X15 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VXOR B2, C2, T2 + VRLW T0, TWELVE, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VRLW T1, TWELVE, B1 + VRLW T2, TWELVE, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ROTLW $12, X4, X4 + ROTLW $12, X5, X5 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + ROTLW $12, X6, X6 + ROTLW $12, X7, X7 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + ADD X4, X0, X0 + ADD X5, X1, X1 + VPERM D0, D0, TWENTY4, D0 + VPERM D1, D1, TWENTY4, D1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VPERM D2, D2, TWENTY4, D2 + VADDUWM C0, D0, C0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ROTLW $8, X12, X12 + ROTLW $8, X13, X13 + VXOR B2, C2, T2 + VRLW T0, SEVEN, B0 + ROTLW $8, X14, X14 + ROTLW $8, X15, X15 + VRLW T1, SEVEN, B1 + VRLW T2, SEVEN, B2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VSLDOI $8, C0, C0, C0 + VSLDOI $8, C1, C1, C1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VSLDOI $8, C2, C2, C2 + VSLDOI $12, B0, B0, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VSLDOI $12, B1, B1, B1 + VSLDOI $12, B2, B2, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VSLDOI $4, D0, D0, D0 + VSLDOI $4, D1, D1, D1 + ROTLW $7, X4, X4 + ROTLW $7, X5, X5 + VSLDOI $4, D2, D2, D2 + VADDUWM A0, B0, A0 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VXOR D2, A2, D2 + VPERM D0, D0, SIXTEEN, D0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VPERM D1, D1, SIXTEEN, D1 + VPERM D2, D2, SIXTEEN, D2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ROTLW $16, X15, X15 + ROTLW $16, X12, X12 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + ROTLW $16, X13, X13 + ROTLW $16, X14, X14 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VRLW T0, TWELVE, B0 + VRLW T1, TWELVE, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VRLW T2, TWELVE, B2 + VADDUWM A0, B0, A0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + XOR X8, X7, X7 + XOR X9, X4, X4 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ROTLW $12, X5, X5 + ROTLW $12, X6, X6 + VXOR D2, A2, D2 + VPERM D0, D0, TWENTY4, D0 + ROTLW $12, X7, X7 + ROTLW $12, X4, X4 + VPERM D1, D1, TWENTY4, D1 + VPERM D2, D2, TWENTY4, D2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VRLW T0, SEVEN, B0 + VRLW T1, SEVEN, B1 + ROTLW $8, X15, X15 + ROTLW $8, X12, X12 + VRLW T2, SEVEN, B2 + VSLDOI $8, C0, C0, C0 + ROTLW $8, X13, X13 + ROTLW $8, X14, X14 + VSLDOI $8, C1, C1, C1 + VSLDOI $8, C2, C2, C2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VSLDOI $4, B0, B0, B0 + VSLDOI $4, B1, B1, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VSLDOI $4, B2, B2, B2 + VSLDOI $12, D0, D0, D0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VSLDOI $12, D1, D1, D1 + VSLDOI $12, D2, D2, D2 + XOR X8, X7, X7 + XOR X9, X4, X4 + ROTLW $7, X5, X5 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + ROTLW $7, X4, X4 + BC 0x10, 0, loop_vmx + + SUB $256, LEN, LEN + + // Accumulate key block + ADD $0x61707865, X0, X0 + ADD $0x3320646e, X1, X1 + ADD $0x79622d32, X2, X2 + ADD $0x6b206574, X3, X3 + ADD TMP0, X4, X4 + ADD TMP1, X5, X5 + ADD TMP2, X6, X6 + ADD TMP3, X7, X7 + MOVWZ 16(KEY), TMP0 + MOVWZ 20(KEY), TMP1 + MOVWZ 24(KEY), TMP2 + MOVWZ 28(KEY), TMP3 + ADD TMP0, X8, X8 + ADD TMP1, X9, X9 + ADD TMP2, X10, X10 + ADD TMP3, X11, X11 + + MOVWZ 12(CNT), TMP0 + MOVWZ 8(CNT), TMP1 + MOVWZ 4(CNT), TMP2 + MOVWZ 0(CNT), TEMP + ADD TMP0, X15, X15 + ADD TMP1, X14, X14 + ADD TMP2, X13, X13 + ADD TEMP, X12, X12 + + // Accumulate key block + VADDUWM A0, K0, A0 + VADDUWM A1, K0, A1 + VADDUWM A2, K0, A2 + VADDUWM B0, K1, B0 + VADDUWM B1, K1, B1 + VADDUWM B2, K1, B2 + VADDUWM C0, K2, C0 + VADDUWM C1, K2, C1 + VADDUWM C2, K2, C2 + VADDUWM D0, K3, D0 + VADDUWM D1, K4, D1 + VADDUWM D2, K5, D2 + + // Increment counter + ADD $4, TEMP, TEMP + MOVW TEMP, 0(CNT) + + VADDUWM K3, FOUR, K3 + VADDUWM K4, FOUR, K4 + VADDUWM K5, FOUR, K5 + + // XOR the input slice (INP) with the keystream, which is stored in GPRs (X0-X3). + + // Load input (aligned or not) + MOVWZ 0(INP), TMP0 + MOVWZ 4(INP), TMP1 + MOVWZ 8(INP), TMP2 + MOVWZ 12(INP), TMP3 + + // XOR with input + XOR TMP0, X0, X0 + XOR TMP1, X1, X1 + XOR TMP2, X2, X2 + XOR TMP3, X3, X3 + MOVWZ 16(INP), TMP0 + MOVWZ 20(INP), TMP1 + MOVWZ 24(INP), TMP2 + MOVWZ 28(INP), TMP3 + XOR TMP0, X4, X4 + XOR TMP1, X5, X5 + XOR TMP2, X6, X6 + XOR TMP3, X7, X7 + MOVWZ 32(INP), TMP0 + MOVWZ 36(INP), TMP1 + MOVWZ 40(INP), TMP2 + MOVWZ 44(INP), TMP3 + XOR TMP0, X8, X8 + XOR TMP1, X9, X9 + XOR TMP2, X10, X10 + XOR TMP3, X11, X11 + MOVWZ 48(INP), TMP0 + MOVWZ 52(INP), TMP1 + MOVWZ 56(INP), TMP2 + MOVWZ 60(INP), TMP3 + XOR TMP0, X12, X12 + XOR TMP1, X13, X13 + XOR TMP2, X14, X14 + XOR TMP3, X15, X15 + + // Store output (aligned or not) + MOVW X0, 0(OUT) + MOVW X1, 4(OUT) + MOVW X2, 8(OUT) + MOVW X3, 12(OUT) + + ADD $64, INP, INP // INP points to the end of the slice for the alignment code below + + MOVW X4, 16(OUT) + MOVD $16, TMP0 + MOVW X5, 20(OUT) + MOVD $32, TMP1 + MOVW X6, 24(OUT) + MOVD $48, TMP2 + MOVW X7, 28(OUT) + MOVD $64, TMP3 + MOVW X8, 32(OUT) + MOVW X9, 36(OUT) + MOVW X10, 40(OUT) + MOVW X11, 44(OUT) + MOVW X12, 48(OUT) + MOVW X13, 52(OUT) + MOVW X14, 56(OUT) + MOVW X15, 60(OUT) + ADD $64, OUT, OUT + + // Load input + LVX (INP)(R0), DD0 + LVX (INP)(TMP0), DD1 + LVX (INP)(TMP1), DD2 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD4 + ADD $64, INP, INP + + VPERM DD1, DD0, INPPERM, DD0 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A0, DD0, A0 // XOR with input + VXOR B0, DD1, B0 + LVX (INP)(TMP0), DD1 // Keep loading input + VXOR C0, DD2, C0 + LVX (INP)(TMP1), DD2 + VXOR D0, DD3, D0 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD0 + ADD $64, INP, INP + MOVD $63, TMP3 // 63 is not a typo + VPERM A0, A0, OUTPERM, A0 + VPERM B0, B0, OUTPERM, B0 + VPERM C0, C0, OUTPERM, C0 + VPERM D0, D0, OUTPERM, D0 + + VPERM DD1, DD4, INPPERM, DD4 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD0, DD3, INPPERM, DD3 + VXOR A1, DD4, A1 + VXOR B1, DD1, B1 + LVX (INP)(TMP0), DD1 // Keep loading + VXOR C1, DD2, C1 + LVX (INP)(TMP1), DD2 + VXOR D1, DD3, D1 + LVX (INP)(TMP2), DD3 + + // Note that the LVX address is always rounded down to the nearest 16-byte + // boundary, and that it always points to at most 15 bytes beyond the end of + // the slice, so we cannot cross a page boundary. + LVX (INP)(TMP3), DD4 // Redundant in aligned case. + ADD $64, INP, INP + VPERM A1, A1, OUTPERM, A1 // Pre-misalign output + VPERM B1, B1, OUTPERM, B1 + VPERM C1, C1, OUTPERM, C1 + VPERM D1, D1, OUTPERM, D1 + + VPERM DD1, DD0, INPPERM, DD0 // Align Input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A2, DD0, A2 + VXOR B2, DD1, B2 + VXOR C2, DD2, C2 + VXOR D2, DD3, D2 + VPERM A2, A2, OUTPERM, A2 + VPERM B2, B2, OUTPERM, B2 + VPERM C2, C2, OUTPERM, C2 + VPERM D2, D2, OUTPERM, D2 + + ANDCC $15, OUT, X1 // Is out aligned? + MOVD OUT, X0 + + VSEL A0, B0, OUTMASK, DD0 // Collect pre-misaligned output + VSEL B0, C0, OUTMASK, DD1 + VSEL C0, D0, OUTMASK, DD2 + VSEL D0, A1, OUTMASK, DD3 + VSEL A1, B1, OUTMASK, B0 + VSEL B1, C1, OUTMASK, C0 + VSEL C1, D1, OUTMASK, D0 + VSEL D1, A2, OUTMASK, A1 + VSEL A2, B2, OUTMASK, B1 + VSEL B2, C2, OUTMASK, C1 + VSEL C2, D2, OUTMASK, D1 + + STVX DD0, (OUT+TMP0) + STVX DD1, (OUT+TMP1) + STVX DD2, (OUT+TMP2) + ADD $64, OUT, OUT + STVX DD3, (OUT+R0) + STVX B0, (OUT+TMP0) + STVX C0, (OUT+TMP1) + STVX D0, (OUT+TMP2) + ADD $64, OUT, OUT + STVX A1, (OUT+R0) + STVX B1, (OUT+TMP0) + STVX C1, (OUT+TMP1) + STVX D1, (OUT+TMP2) + ADD $64, OUT, OUT + + BEQ aligned_vmx + + SUB X1, OUT, X2 // in misaligned case edges + MOVD $0, X3 // are written byte-by-byte + +unaligned_tail_vmx: + STVEBX D2, (X2+X3) + ADD $1, X3, X3 + CMPW X3, X1 + BNE unaligned_tail_vmx + SUB X1, X0, X2 + +unaligned_head_vmx: + STVEBX A0, (X2+X1) + CMPW X1, $15 + ADD $1, X1, X1 + BNE unaligned_head_vmx + + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + + JMP done_vmx + +aligned_vmx: + STVX A0, (X0+R0) + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + +done_vmx: + RET diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go new file mode 100644 index 00000000..ad74e23a --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo + +package chacha20 + +const ( + haveAsm = true + bufSize = 256 +) + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + + if len(src) >= bufSize { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } + + if len(src)%bufSize != 0 { + i := len(src) - len(src)%bufSize + c.buf = [bufSize]byte{} + copy(c.buf[:], src[i:]) + xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter) + c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize]) + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go new file mode 100644 index 00000000..6570847f --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go @@ -0,0 +1,264 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ChaCha20 implements the core ChaCha20 function as specified +// in https://tools.ietf.org/html/rfc7539#section-2.3. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + + "golang.org/x/crypto/internal/subtle" +) + +// assert that *Cipher implements cipher.Stream +var _ cipher.Stream = (*Cipher)(nil) + +// Cipher is a stateful instance of ChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + key [8]uint32 + counter uint32 // incremented after each block + nonce [3]uint32 + buf [bufSize]byte // buffer for unused keystream bytes + len int // number of unused keystream bytes at end of buf +} + +// New creates a new ChaCha20 stream cipher with the given key and nonce. +// The initial counter value is set to 0. +func New(key [8]uint32, nonce [3]uint32) *Cipher { + return &Cipher{key: key, nonce: nonce} +} + +// ChaCha20 constants spelling "expand 32-byte k" +const ( + j0 uint32 = 0x61707865 + j1 uint32 = 0x3320646e + j2 uint32 = 0x79622d32 + j3 uint32 = 0x6b206574 +) + +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = (d << 16) | (d >> 16) + c += d + b ^= c + b = (b << 12) | (b >> 20) + a += b + d ^= a + d = (d << 8) | (d >> 24) + c += d + b ^= c + b = (b << 7) | (b >> 25) + return a, b, c, d +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + if subtle.InexactOverlap(dst[:len(src)], src) { + panic("chacha20: invalid buffer overlap") + } + + // xor src with buffered keystream first + if s.len != 0 { + buf := s.buf[len(s.buf)-s.len:] + if len(src) < len(buf) { + buf = buf[:len(src)] + } + td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint + for i, b := range buf { + td[i] = ts[i] ^ b + } + s.len -= len(buf) + if s.len != 0 { + return + } + s.buf = [len(s.buf)]byte{} // zero the empty buffer + src = src[len(buf):] + dst = dst[len(buf):] + } + + if len(src) == 0 { + return + } + if haveAsm { + if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 { + panic("chacha20: counter overflow") + } + s.xorKeyStreamAsm(dst, src) + return + } + + // set up a 64-byte buffer to pad out the final block if needed + // (hoisted out of the main loop to avoid spills) + rem := len(src) % 64 // length of final block + fin := len(src) - rem // index of final block + if rem > 0 { + copy(s.buf[len(s.buf)-64:], src[fin:]) + } + + // pre-calculate most of the first round + s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0]) + s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1]) + s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2]) + + n := len(src) + src, dst = src[:n:n], dst[:n:n] // BCE hint + for i := 0; i < n; i += 64 { + // calculate the remainder of the first round + s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter) + + // execute the second round + x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15) + x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12) + x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13) + x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14) + + // execute the remaining 18 rounds + for i := 0; i < 9; i++ { + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + + x4 += s.key[0] + x5 += s.key[1] + x6 += s.key[2] + x7 += s.key[3] + x8 += s.key[4] + x9 += s.key[5] + x10 += s.key[6] + x11 += s.key[7] + + x12 += s.counter + x13 += s.nonce[0] + x14 += s.nonce[1] + x15 += s.nonce[2] + + // increment the counter + s.counter += 1 + if s.counter == 0 { + panic("chacha20: counter overflow") + } + + // pad to 64 bytes if needed + in, out := src[i:], dst[i:] + if i == fin { + // src[fin:] has already been copied into s.buf before + // the main loop + in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:] + } + in, out = in[:64], out[:64] // BCE hint + + // XOR the key stream with the source and write out the result + xor(out[0:], in[0:], x0) + xor(out[4:], in[4:], x1) + xor(out[8:], in[8:], x2) + xor(out[12:], in[12:], x3) + xor(out[16:], in[16:], x4) + xor(out[20:], in[20:], x5) + xor(out[24:], in[24:], x6) + xor(out[28:], in[28:], x7) + xor(out[32:], in[32:], x8) + xor(out[36:], in[36:], x9) + xor(out[40:], in[40:], x10) + xor(out[44:], in[44:], x11) + xor(out[48:], in[48:], x12) + xor(out[52:], in[52:], x13) + xor(out[56:], in[56:], x14) + xor(out[60:], in[60:], x15) + } + // copy any trailing bytes out of the buffer and into dst + if rem != 0 { + s.len = 64 - rem + copy(dst[fin:], s.buf[len(s.buf)-64:]) + } +} + +// Advance discards bytes in the key stream until the next 64 byte block +// boundary is reached and updates the counter accordingly. If the key +// stream is already at a block boundary no bytes will be discarded and +// the counter will be unchanged. +func (s *Cipher) Advance() { + s.len -= s.len % 64 + if s.len == 0 { + s.buf = [len(s.buf)]byte{} + } +} + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter contains the raw +// ChaCha20 counter bytes (i.e. block counter followed by nonce). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + s := Cipher{ + key: [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + }, + nonce: [3]uint32{ + binary.LittleEndian.Uint32(counter[4:8]), + binary.LittleEndian.Uint32(counter[8:12]), + binary.LittleEndian.Uint32(counter[12:16]), + }, + counter: binary.LittleEndian.Uint32(counter[0:4]), + } + s.XORKeyStream(out, in) +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a +// nonce. It should only be used as part of the XChaCha20 construction. +func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 { + x0, x1, x2, x3 := j0, j1, j2, j3 + x4, x5, x6, x7 := key[0], key[1], key[2], key[3] + x8, x9, x10, x11 := key[4], key[5], key[6], key[7] + x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3] + + for i := 0; i < 10; i++ { + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + var out [8]uint32 + out[0], out[1], out[2], out[3] = x0, x1, x2, x3 + out[4], out[5], out[6], out[7] = x12, x13, x14, x15 + return out +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go new file mode 100644 index 00000000..bf8beba6 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !ppc64le,!arm64,!s390x arm64,!go1.11 gccgo appengine + +package chacha20 + +const ( + bufSize = 64 + haveAsm = false +) + +func (*Cipher) xorKeyStreamAsm(dst, src []byte) { + panic("not implemented") +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go new file mode 100644 index 00000000..638cb5e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package chacha20 + +import "encoding/binary" + +const ( + bufSize = 256 + haveAsm = true +) + +//go:noescape +func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + if len(src) >= bufSize { + chaCha20_ctr32_vmx(&dst[0], &src[0], len(src)-len(src)%bufSize, &c.key, &c.counter) + } + if len(src)%bufSize != 0 { + chaCha20_ctr32_vmx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter) + start := len(src) - len(src)%bufSize + ts, td, tb := src[start:], dst[start:], c.buf[:] + // Unroll loop to XOR 32 bytes per iteration. + for i := 0; i < len(ts)-32; i += 32 { + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + s0 := binary.LittleEndian.Uint64(ts[0:8]) + s1 := binary.LittleEndian.Uint64(ts[8:16]) + s2 := binary.LittleEndian.Uint64(ts[16:24]) + s3 := binary.LittleEndian.Uint64(ts[24:32]) + b0 := binary.LittleEndian.Uint64(tb[0:8]) + b1 := binary.LittleEndian.Uint64(tb[8:16]) + b2 := binary.LittleEndian.Uint64(tb[16:24]) + b3 := binary.LittleEndian.Uint64(tb[24:32]) + binary.LittleEndian.PutUint64(td[0:8], s0^b0) + binary.LittleEndian.PutUint64(td[8:16], s1^b1) + binary.LittleEndian.PutUint64(td[16:24], s2^b2) + binary.LittleEndian.PutUint64(td[24:32], s3^b3) + ts, td, tb = ts[32:], td[32:], tb[32:] + } + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + for i, v := range ts { + td[i] = tb[i] ^ v + } + c.len = bufSize - (len(src) % bufSize) + + } + +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go new file mode 100644 index 00000000..aad645b4 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!gccgo,!appengine + +package chacha20 + +import ( + "golang.org/x/sys/cpu" +) + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. +// Implementation in asm_s390x.s. +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len) +} + +// EXRL targets, DO NOT CALL! +func mvcSrcToBuf() +func mvcBufToDst() diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s new file mode 100644 index 00000000..57df4044 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s @@ -0,0 +1,260 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!gccgo,!appengine + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +// EXRL targets: +TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0 + MVC $1, (R1), (R8) + RET + +TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0 + MVC $1, (R8), (R9) + RET + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + MOVD buf+72(FP), R8 // R8=buf + MOVD len+80(FP), R9 // R9=len + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // set up tail buffer + ADD $-1, R4, R12 + MOVBZ R12, R12 + CMPUBEQ R12, $255, aligned + MOVD R4, R1 + AND $~255, R1 + MOVD $(R3)(R1*1), R1 + EXRL $·mvcSrcToBuf(SB), R12 + MOVD $255, R0 + SUB R12, R0 + MOVD R0, (R9) // update len + +aligned: + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + BLT tail + +continue: + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + CMPUBEQ R12, $255, return + EXRL $·mvcBufToDst(SB), R12 // len was updated during setup + +return: + VSTEF $0, CTR, (R7) + RET + +tail: + MOVD R2, R9 + MOVD R8, R2 + MOVD R8, R3 + MOVD $0, R4 + JMP continue diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/internal/chacha20/xor.go new file mode 100644 index 00000000..9c5ba0b3 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/xor.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import ( + "runtime" +) + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// xor reads a little endian uint32 from src, XORs it with u and +// places the result in little endian byte order in dst. +func xor(dst, src []byte, u uint32) { + _, _ = src[3], dst[3] // eliminate bounds checks + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= u + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + dst[0] = src[0] ^ byte(u) + dst[1] = src[1] ^ byte(u>>8) + dst[2] = src[2] ^ byte(u>>16) + dst[3] = src[3] ^ byte(u>>24) + } +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 00000000..f38797bf --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go new file mode 100644 index 00000000..0cc4a8a6 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 00000000..a8dd589a --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!ppc64le gccgo appengine + +package poly1305 + +type mac struct{ macGeneric } + +func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 00000000..d076a562 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Verify returns true if mac is a valid authenticator for m with the given +// key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + return &MAC{ + mac: newMAC(key), + finalized: false, + } +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 00000000..2dbf42aa --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package poly1305 + +//go:noescape +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 00000000..7d600f13 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVQ state+0(FP), DI + MOVQ key+8(FP), SI + + // state[0...7] is initialized with zero + MOVOU 0(SI), X0 + MOVOU 16(SI), X1 + MOVOU ·poly1305Mask<>(SB), X2 + PAND X2, X0 + MOVOU X0, 24(DI) + MOVOU X1, 40(DI) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVQ tag+0(FP), DI + MOVQ state+8(FP), SI + + MOVQ 0(SI), AX + MOVQ 8(SI), BX + MOVQ 16(SI), CX + MOVQ AX, R8 + MOVQ BX, R9 + SUBQ $0xFFFFFFFFFFFFFFFB, AX + SBBQ $0xFFFFFFFFFFFFFFFF, BX + SBBQ $3, CX + CMOVQCS R8, AX + CMOVQCS R9, BX + ADDQ 40(SI), AX + ADCQ 48(SI), BX + + MOVQ AX, 0(DI) + MOVQ BX, 8(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go new file mode 100644 index 00000000..5dc321c2 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +package poly1305 + +// This function is implemented in sum_arm.s +//go:noescape +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s new file mode 100644 index 00000000..f70b4ac4 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.s @@ -0,0 +1,427 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +#include "textflag.h" + +// This code was translated into a form compatible with 5a from the public +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. + +DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff +DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 +DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff +DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff +DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff +GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 + +// Warning: the linker may use R11 to synthesize certain instructions. Please +// take care and verify that no synthetic instructions use it. + +TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 + // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It + // might look like it's only 60 bytes of space but the final four bytes + // will be written by another function.) We need to skip over four + // bytes of stack because that's saving the value of 'g'. + ADD $4, R13, R8 + MOVM.IB [R4-R7], (R8) + MOVM.IA.W (R1), [R2-R5] + MOVW $·poly1305_init_constants_armv6<>(SB), R7 + MOVW R2, R8 + MOVW R2>>26, R9 + MOVW R3>>20, g + MOVW R4>>14, R11 + MOVW R5>>8, R12 + ORR R3<<6, R9, R9 + ORR R4<<12, g, g + ORR R5<<18, R11, R11 + MOVM.IA (R7), [R2-R6] + AND R8, R2, R2 + AND R9, R3, R3 + AND g, R4, R4 + AND R11, R5, R5 + AND R12, R6, R6 + MOVM.IA.W [R2-R6], (R0) + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + MOVM.IA.W [R2-R6], (R0) + MOVM.IA.W (R1), [R2-R5] + MOVM.IA [R2-R6], (R0) + ADD $20, R13, R0 + MOVM.DA (R0), [R4-R7] + RET + +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ + MOVBU (offset+0)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+0)(Rdst); \ + MOVBU (offset+1)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+1)(Rdst); \ + MOVBU (offset+2)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+2)(Rdst); \ + MOVBU (offset+3)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+3)(Rdst) + +TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 + // Needs 24 bytes of stack for saved registers and then 88 bytes of + // scratch space after that. We assume that 24 bytes at (R13) have + // already been used: four bytes for the link register saved in the + // prelude of poly1305_auth_armv6, four bytes for saving the value of g + // in that function and 16 bytes of scratch space used around + // poly1305_finish_ext_armv6_skip1. + ADD $24, R13, R12 + MOVM.IB [R4-R8, R14], (R12) + MOVW R0, 88(R13) + MOVW R1, 92(R13) + MOVW R2, 96(R13) + MOVW R1, R14 + MOVW R2, R12 + MOVW 56(R0), R8 + WORD $0xe1180008 // TST R8, R8 not working see issue 5921 + EOR R6, R6, R6 + MOVW.EQ $(1<<24), R6 + MOVW R6, 84(R13) + ADD $116, R13, g + MOVM.IA (R0), [R0-R9] + MOVM.IA [R0-R4], (g) + CMP $16, R12 + BLO poly1305_blocks_armv6_done + +poly1305_blocks_armv6_mainloop: + WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 + BEQ poly1305_blocks_armv6_mainloop_aligned + ADD $100, R13, g + MOVW_UNALIGNED(R14, g, R0, 0) + MOVW_UNALIGNED(R14, g, R0, 4) + MOVW_UNALIGNED(R14, g, R0, 8) + MOVW_UNALIGNED(R14, g, R0, 12) + MOVM.IA (g), [R0-R3] + ADD $16, R14 + B poly1305_blocks_armv6_mainloop_loaded + +poly1305_blocks_armv6_mainloop_aligned: + MOVM.IA.W (R14), [R0-R3] + +poly1305_blocks_armv6_mainloop_loaded: + MOVW R0>>26, g + MOVW R1>>20, R11 + MOVW R2>>14, R12 + MOVW R14, 92(R13) + MOVW R3>>8, R4 + ORR R1<<6, g, g + ORR R2<<12, R11, R11 + ORR R3<<18, R12, R12 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, g, g + MOVW 84(R13), R3 + BIC $0xfc000000, R11, R11 + BIC $0xfc000000, R12, R12 + ADD R0, R5, R5 + ADD g, R6, R6 + ORR R3, R4, R4 + ADD R11, R7, R7 + ADD $116, R13, R14 + ADD R12, R8, R8 + ADD R4, R9, R9 + MOVM.IA (R14), [R0-R4] + MULLU R4, R5, (R11, g) + MULLU R3, R5, (R14, R12) + MULALU R3, R6, (R11, g) + MULALU R2, R6, (R14, R12) + MULALU R2, R7, (R11, g) + MULALU R1, R7, (R14, R12) + ADD R4<<2, R4, R4 + ADD R3<<2, R3, R3 + MULALU R1, R8, (R11, g) + MULALU R0, R8, (R14, R12) + MULALU R0, R9, (R11, g) + MULALU R4, R9, (R14, R12) + MOVW g, 76(R13) + MOVW R11, 80(R13) + MOVW R12, 68(R13) + MOVW R14, 72(R13) + MULLU R2, R5, (R11, g) + MULLU R1, R5, (R14, R12) + MULALU R1, R6, (R11, g) + MULALU R0, R6, (R14, R12) + MULALU R0, R7, (R11, g) + MULALU R4, R7, (R14, R12) + ADD R2<<2, R2, R2 + ADD R1<<2, R1, R1 + MULALU R4, R8, (R11, g) + MULALU R3, R8, (R14, R12) + MULALU R3, R9, (R11, g) + MULALU R2, R9, (R14, R12) + MOVW g, 60(R13) + MOVW R11, 64(R13) + MOVW R12, 52(R13) + MOVW R14, 56(R13) + MULLU R0, R5, (R11, g) + MULALU R4, R6, (R11, g) + MULALU R3, R7, (R11, g) + MULALU R2, R8, (R11, g) + MULALU R1, R9, (R11, g) + ADD $52, R13, R0 + MOVM.IA (R0), [R0-R7] + MOVW g>>26, R12 + MOVW R4>>26, R14 + ORR R11<<6, R12, R12 + ORR R5<<6, R14, R14 + BIC $0xfc000000, g, g + BIC $0xfc000000, R4, R4 + ADD.S R12, R0, R0 + ADC $0, R1, R1 + ADD.S R14, R6, R6 + ADC $0, R7, R7 + MOVW R0>>26, R12 + MOVW R6>>26, R14 + ORR R1<<6, R12, R12 + ORR R7<<6, R14, R14 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, R6, R6 + ADD R14<<2, R14, R14 + ADD.S R12, R2, R2 + ADC $0, R3, R3 + ADD R14, g, g + MOVW R2>>26, R12 + MOVW g>>26, R14 + ORR R3<<6, R12, R12 + BIC $0xfc000000, g, R5 + BIC $0xfc000000, R2, R7 + ADD R12, R4, R4 + ADD R14, R0, R0 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R8 + ADD R12, R6, R9 + MOVW 96(R13), R12 + MOVW 92(R13), R14 + MOVW R0, R6 + CMP $32, R12 + SUB $16, R12, R12 + MOVW R12, 96(R13) + BHS poly1305_blocks_armv6_mainloop + +poly1305_blocks_armv6_done: + MOVW 88(R13), R12 + MOVW R5, 20(R12) + MOVW R6, 24(R12) + MOVW R7, 28(R12) + MOVW R8, 32(R12) + MOVW R9, 36(R12) + ADD $48, R13, R0 + MOVM.DA (R0), [R4-R8, R14] + RET + +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst); \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst) + +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) + +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) +TEXT ·poly1305_auth_armv6(SB), $196-16 + // The value 196, just above, is the sum of 64 (the size of the context + // structure) and 132 (the amount of stack needed). + // + // At this point, the stack pointer (R13) has been moved down. It + // points to the saved link register and there's 196 bytes of free + // space above it. + // + // The stack for this function looks like: + // + // +--------------------- + // | + // | 64 bytes of context structure + // | + // +--------------------- + // | + // | 112 bytes for poly1305_blocks_armv6 + // | + // +--------------------- + // | 16 bytes of final block, constructed at + // | poly1305_finish_ext_armv6_skip8 + // +--------------------- + // | four bytes of saved 'g' + // +--------------------- + // | lr, saved by prelude <- R13 points here + // +--------------------- + MOVW g, 4(R13) + + MOVW out+0(FP), R4 + MOVW m+4(FP), R5 + MOVW mlen+8(FP), R6 + MOVW key+12(FP), R7 + + ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 + MOVW R7, R1 + + // poly1305_init_ext_armv6 will write to the stack from R13+4, but + // that's ok because none of the other values have been written yet. + BL poly1305_init_ext_armv6<>(SB) + BIC.S $15, R6, R2 + BEQ poly1305_auth_armv6_noblocks + ADD $136, R13, R0 + MOVW R5, R1 + ADD R2, R5, R5 + SUB R2, R6, R6 + BL poly1305_blocks_armv6<>(SB) + +poly1305_auth_armv6_noblocks: + ADD $136, R13, R0 + MOVW R5, R1 + MOVW R6, R2 + MOVW R4, R3 + + MOVW R0, R5 + MOVW R1, R6 + MOVW R2, R7 + MOVW R3, R8 + AND.S R2, R2, R2 + BEQ poly1305_finish_ext_armv6_noremaining + EOR R0, R0 + ADD $8, R13, R9 // 8 = offset to 16 byte scratch space + MOVW R0, (R9) + MOVW R0, 4(R9) + MOVW R0, 8(R9) + MOVW R0, 12(R9) + WORD $0xe3110003 // TST R1, #3 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_aligned + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8 + MOVWP_UNALIGNED(R1, R9, g) + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip8: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4 + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip4: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHUP_UNALIGNED(R1, R9, g) + B poly1305_finish_ext_armv6_skip2 + +poly1305_finish_ext_armv6_aligned: + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8_aligned + MOVM.IA.W (R1), [g-R11] + MOVM.IA.W [g-R11], (R9) + +poly1305_finish_ext_armv6_skip8_aligned: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4_aligned + MOVW.P 4(R1), g + MOVW.P g, 4(R9) + +poly1305_finish_ext_armv6_skip4_aligned: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHU.P 2(R1), g + MOVH.P g, 2(R9) + +poly1305_finish_ext_armv6_skip2: + WORD $0xe3120001 // TST $1, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip1 + MOVBU.P 1(R1), g + MOVBU.P g, 1(R9) + +poly1305_finish_ext_armv6_skip1: + MOVW $1, R11 + MOVBU R11, 0(R9) + MOVW R11, 56(R5) + MOVW R5, R0 + ADD $8, R13, R1 + MOVW $16, R2 + BL poly1305_blocks_armv6<>(SB) + +poly1305_finish_ext_armv6_noremaining: + MOVW 20(R5), R0 + MOVW 24(R5), R1 + MOVW 28(R5), R2 + MOVW 32(R5), R3 + MOVW 36(R5), R4 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R4 + ADD R12<<2, R12, R12 + ADD R12, R0, R0 + MOVW R0>>26, R12 + BIC $0xfc000000, R0, R0 + ADD R12, R1, R1 + MOVW R1>>26, R12 + BIC $0xfc000000, R1, R1 + ADD R12, R2, R2 + MOVW R2>>26, R12 + BIC $0xfc000000, R2, R2 + ADD R12, R3, R3 + MOVW R3>>26, R12 + BIC $0xfc000000, R3, R3 + ADD R12, R4, R4 + ADD $5, R0, R6 + MOVW R6>>26, R12 + BIC $0xfc000000, R6, R6 + ADD R12, R1, R7 + MOVW R7>>26, R12 + BIC $0xfc000000, R7, R7 + ADD R12, R2, g + MOVW g>>26, R12 + BIC $0xfc000000, g, g + ADD R12, R3, R11 + MOVW $-(1<<26), R12 + ADD R11>>26, R12, R12 + BIC $0xfc000000, R11, R11 + ADD R12, R4, R9 + MOVW R9>>31, R12 + SUB $1, R12 + AND R12, R6, R6 + AND R12, R7, R7 + AND R12, g, g + AND R12, R11, R11 + AND R12, R9, R9 + MVN R12, R12 + AND R12, R0, R0 + AND R12, R1, R1 + AND R12, R2, R2 + AND R12, R3, R3 + AND R12, R4, R4 + ORR R6, R0, R0 + ORR R7, R1, R1 + ORR g, R2, R2 + ORR R11, R3, R3 + ORR R9, R4, R4 + ORR R1<<26, R0, R0 + MOVW R1>>6, R1 + ORR R2<<20, R1, R1 + MOVW R2>>12, R2 + ORR R3<<14, R2, R2 + MOVW R3>>18, R3 + ORR R4<<8, R3, R3 + MOVW 40(R5), R6 + MOVW 44(R5), R7 + MOVW 48(R5), g + MOVW 52(R5), R11 + ADD.S R6, R0, R0 + ADC.S R7, R1, R1 + ADC.S g, R2, R2 + ADC.S R11, R3, R3 + MOVM.IA [R0-R3], (R8) + MOVW R5, R12 + EOR R0, R0, R0 + EOR R1, R1, R1 + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + EOR R7, R7, R7 + MOVM.IA.W [R0-R7], (R12) + MOVM.IA [R0-R7], (R12) + MOVW 4(R13), g + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 00000000..bab76ef0 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poly1305 + +import "encoding/binary" + +const ( + msgBlock = uint32(1 << 24) + finalBlock = uint32(0) +) + +// sumGeneric generates an authenticator for msg using a one-time key and +// puts the 16-byte result into out. This is the generic implementation of +// Sum and should be called if no assembly implementation is available. +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) (h macGeneric) { + h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff + h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 + h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff + h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff + h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff + + h.s[0] = binary.LittleEndian.Uint32(key[16:]) + h.s[1] = binary.LittleEndian.Uint32(key[20:]) + h.s[2] = binary.LittleEndian.Uint32(key[24:]) + h.s[3] = binary.LittleEndian.Uint32(key[28:]) + return +} + +type macGeneric struct { + h, r [5]uint32 + s [4]uint32 + + buffer [TagSize]byte + offset int +} + +func (h *macGeneric) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r)) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + updateGeneric(p, msgBlock, &(h.h), &(h.r)) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *macGeneric) Sum(out *[16]byte) { + H, R := h.h, h.r + if h.offset > 0 { + var buffer [TagSize]byte + copy(buffer[:], h.buffer[:h.offset]) + buffer[h.offset] = 1 // invariant: h.offset < TagSize + updateGeneric(buffer[:], finalBlock, &H, &R) + } + finalizeGeneric(out, &H, &(h.s)) +} + +func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] + r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4]) + R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 + + for len(msg) >= TagSize { + // h += msg + h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + + msg = msg[TagSize:] + } + + h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4 +} + +func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] + + // h %= p reduction + h2 += h1 >> 26 + h1 &= 0x3ffffff + h3 += h2 >> 26 + h2 &= 0x3ffffff + h4 += h3 >> 26 + h3 &= 0x3ffffff + h0 += 5 * (h4 >> 26) + h4 &= 0x3ffffff + h1 += h0 >> 26 + h0 &= 0x3ffffff + + // h - p + t0 := h0 + 5 + t1 := h1 + (t0 >> 26) + t2 := h2 + (t1 >> 26) + t3 := h3 + (t2 >> 26) + t4 := h4 + (t3 >> 26) - (1 << 26) + t0 &= 0x3ffffff + t1 &= 0x3ffffff + t2 &= 0x3ffffff + t3 &= 0x3ffffff + + // select h if h < p else h - p + t_mask := (t4 >> 31) - 1 + h_mask := ^t_mask + h0 = (h0 & h_mask) | (t0 & t_mask) + h1 = (h1 & h_mask) | (t1 & t_mask) + h2 = (h2 & h_mask) | (t2 & t_mask) + h3 = (h3 & h_mask) | (t3 & t_mask) + h4 = (h4 & h_mask) | (t4 & t_mask) + + // h %= 2^128 + h0 |= h1 << 26 + h1 = ((h1 >> 6) | (h2 << 20)) + h2 = ((h2 >> 12) | (h3 << 14)) + h3 = ((h3 >> 18) | (h4 << 8)) + + // s: the s part of the key + // tag = (h + s) % (2^128) + t := uint64(h0) + uint64(s[0]) + h0 = uint32(t) + t = uint64(h1) + uint64(s[1]) + (t >> 32) + h1 = uint32(t) + t = uint64(h2) + uint64(s[2]) + (t >> 32) + h2 = uint32(t) + t = uint64(h3) + uint64(s[3]) + (t >> 32) + h3 = uint32(t) + + binary.LittleEndian.PutUint32(out[0:], h0) + binary.LittleEndian.PutUint32(out[4:], h1) + binary.LittleEndian.PutUint32(out[8:], h2) + binary.LittleEndian.PutUint32(out[12:], h3) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go new file mode 100644 index 00000000..8a9c2070 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl + +package poly1305 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMAC(key) + h.Write(msg) + h.Sum(out) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 00000000..2402b637 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,68 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package poly1305 + +//go:noescape +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 00000000..55c7167e --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,247 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) + +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVD state+0(FP), R3 + MOVD key+8(FP), R4 + + // state[0...7] is initialized with zero + // Load key + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + MOVD 24(R4), R8 + + // Address of key mask + MOVD $·poly1305Mask<>(SB), R9 + + // Save original key in state + MOVD R7, 40(R3) + MOVD R8, 48(R3) + + // Get mask + MOVD (R9), R7 + MOVD 8(R9), R8 + + // And with key + AND R5, R7, R5 + AND R6, R8, R6 + + // Save masked key in state + MOVD R5, 24(R3) + MOVD R6, 32(R3) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVD tag+0(FP), R3 + MOVD state+8(FP), R4 + + // Get h0, h1, h2 from state + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + + // Save h0, h1 + MOVD R5, R8 + MOVD R6, R9 + MOVD $3, R20 + MOVD $-1, R21 + SUBC $-5, R5 + SUBE R21, R6 + SUBE R20, R7 + MOVD $0, R21 + SUBZE R21 + + // Check for carry + CMP $0, R21 + ISEL $2, R5, R8, R5 + ISEL $2, R6, R9, R6 + MOVD 40(R4), R8 + MOVD 48(R4), R9 + ADDC R8, R5 + ADDE R9, R6 + MOVD R5, 0(R3) + MOVD R6, 8(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 00000000..ec99e07e --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// poly1305vx is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// poly1305vmsl is an assembly implementation of Poly1305 that uses vector +// instructions, including VMSL. It must only be called if the vector facility (vx) is +// available and if VMSL is supported. +//go:noescape +func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + if cpu.S390X.HasVX { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + if cpu.S390X.HasVXE && len(m) > 256 { + poly1305vmsl(out, mPtr, uint64(len(m)), key) + } else { + poly1305vx(out, mPtr, uint64(len(m)), key) + } + } else { + sumGeneric(out, m, key) + } +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 00000000..ca5a309d --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,378 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx). + +// constants +#define MOD26 V0 +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 + +// key (r) +#define R_0 V9 +#define R_1 V10 +#define R_2 V11 +#define R_3 V12 +#define R_4 V13 +#define R5_1 V14 +#define R5_2 V15 +#define R5_3 V16 +#define R5_4 V17 +#define RSAVE_0 R5 +#define RSAVE_1 R6 +#define RSAVE_2 R7 +#define RSAVE_3 R8 +#define RSAVE_4 R9 +#define R5SAVE_1 V28 +#define R5SAVE_2 V29 +#define R5SAVE_3 V30 +#define R5SAVE_4 V31 + +// message block +#define F_0 V18 +#define F_1 V19 +#define F_2 V20 +#define F_3 V21 +#define F_4 V22 + +// accumulator +#define H_0 V23 +#define H_1 V24 +#define H_2 V25 +#define H_3 V26 +#define H_4 V27 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $64 +// MOD26 +DATA ·constants<>+0(SB)/8, $0x3ffffff +DATA ·constants<>+8(SB)/8, $0x3ffffff +// EX0 +DATA ·constants<>+16(SB)/8, $0x0006050403020100 +DATA ·constants<>+24(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d + +// h = (f*g) % (2**130-5) [partial reduction] +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g2, h2 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g4, h4 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g1, T_2 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g3, T_4 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g53, h2, h2 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g0, h4, h4 \ + VAG T_0, h0, h0 \ + VAG T_1, h1, h1 \ + VAG T_2, h2, h2 \ + VAG T_3, h3, h3 \ + VAG T_4, h4, h4 + +// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// expand in0 into d[0] and in1 into d[1] +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VGBM $0x0707, d1 \ // d1=tmp + VPERM in0, in1, EX2, d4 \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VN d1, d4, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ + VN MOD26, d1, d1 \ + VN MOD26, d2, d2 \ + VN MOD26, d3, d3 + +// pack h4:h0 into h1:h0 (no carry) +#define PACK(h0, h1, h2, h3, h4) \ + VESLG $26, h1, h1 \ + VESLG $26, h3, h3 \ + VO h0, h1, h0 \ + VO h2, h3, h2 \ + VESLG $4, h2, h2 \ + VLEIB $7, $48, h1 \ + VSLB h1, h2, h2 \ + VO h0, h2, h0 \ + VLEIB $7, $104, h1 \ + VSLB h1, h4, h3 \ + VO h3, h0, h0 \ + VLEIB $7, $24, h1 \ + VSRLB h1, h4, h1 + +// if h > 2**130-5 then h -= 2**130-5 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 + +// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vx(SB), $0-32 + // This code processes up to 2 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + + // load MOD26, EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), MOD26, EX2 + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) + + // setup r*5 + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + + // store r (for final block) + VMLOF T_0, R_1, R5SAVE_1 + VMLOF T_0, R_2, R5SAVE_2 + VMLOF T_0, R_3, R5SAVE_3 + VMLOF T_0, R_4, R5SAVE_4 + VLGVG $0, R_0, RSAVE_0 + VLGVG $0, R_1, RSAVE_1 + VLGVG $0, R_2, RSAVE_2 + VLGVG $0, R_3, RSAVE_3 + VLGVG $0, R_4, RSAVE_4 + + // skip r**2 calculation + CMPBLE R3, $16, skip + + // calculate r**2 + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + VMLOF T_0, H_1, R5_1 + VMLOF T_0, H_2, R5_2 + VMLOF T_0, H_3, R5_3 + VMLOF T_0, H_4, R5_4 + VLR H_0, R_0 + VLR H_1, R_1 + VLR H_2, R_2 + VLR H_3, R_3 + VLR H_4, R_4 + + // initialize h + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + +loop: + CMPBLE R3, $32, b2 + VLM (R2), T_0, T_1 + SUB $32, R3 + MOVD $32(R2), R2 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + VLEIB $4, $1, F_4 + VLEIB $12, $1, F_4 + +multiply: + VAG H_0, F_0, F_0 + VAG H_1, F_1, F_1 + VAG H_2, F_2, F_2 + VAG H_3, F_3, F_3 + VAG H_4, F_4, F_4 + MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + CMPBNE R3, $0, loop + +finish: + // sum vectors + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_1, T_0, H_1 + VSUMQG H_2, T_0, H_2 + VSUMQG H_3, T_0, H_3 + VSUMQG H_4, T_0, H_4 + + // h may be >= 2*(2**130-5) so we need to reduce it again + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h1->h4 + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H_0, H_1, H_2, H_3, H_4) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H_0, H_1, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H_0, H_0 + VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) + VST H_0, (R1) + + RET + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + SUB $17, R3 + VL (R2), T_0 + VLL R3, 16(R2), T_1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, F_4 + VLEIB $4, $1, F_4 + + // setup [r²,r] + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, RSAVE_3, R_3 + VLVGG $1, RSAVE_4, R_4 + VPDI $0, R5_1, R5SAVE_1, R5_1 + VPDI $0, R5_2, R5SAVE_2, R5_2 + VPDI $0, R5_3, R5SAVE_3, R5_3 + VPDI $0, R5_4, R5SAVE_4, R5_4 + + MOVD $0, R3 + BR multiply + +skip: + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + + CMPBEQ R3, $0, finish + +b1: + // 1 block remaining + SUB $1, R3 + VLL R3, (R2), T_0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + VZERO T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, F_4 + VLEIG $1, $1, R_0 + VZERO R_1 + VZERO R_2 + VZERO R_3 + VZERO R_4 + VZERO R5_1 + VZERO R5_2 + VZERO R5_3 + VZERO R5_4 + + // setup [r, 1] + VLVGG $0, RSAVE_0, R_0 + VLVGG $0, RSAVE_1, R_1 + VLVGG $0, RSAVE_2, R_2 + VLVGG $0, RSAVE_3, R_3 + VLVGG $0, RSAVE_4, R_4 + VPDI $0, R5SAVE_1, R5_1, R5_1 + VPDI $0, R5SAVE_2, R5_2, R5_2 + VPDI $0, R5SAVE_3, R5_3, R5_3 + VPDI $0, R5SAVE_4, R5_4, R5_4 + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s new file mode 100644 index 00000000..e60bbc1d --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s @@ -0,0 +1,909 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. + +// constants +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 +#define T_5 V9 +#define T_6 V10 +#define T_7 V11 +#define T_8 V12 +#define T_9 V13 +#define T_10 V14 + +// r**2 & r**4 +#define R_0 V15 +#define R_1 V16 +#define R_2 V17 +#define R5_1 V18 +#define R5_2 V19 +// key (r) +#define RSAVE_0 R7 +#define RSAVE_1 R8 +#define RSAVE_2 R9 +#define R5SAVE_1 R10 +#define R5SAVE_2 R11 + +// message block +#define M0 V20 +#define M1 V21 +#define M2 V22 +#define M3 V23 +#define M4 V24 +#define M5 V25 + +// accumulator +#define H0_0 V26 +#define H1_0 V27 +#define H2_0 V28 +#define H0_1 V29 +#define H1_1 V30 +#define H2_1 V31 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $48 +// EX0 +DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+8(SB)/8, $0x0000050403020100 +// EX1 +DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+24(SB)/8, $0x00000a0908070605 +// EX2 +DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b + +GLOBL ·c<>(SB), RODATA, $48 +// EX0 +DATA ·c<>+0(SB)/8, $0x0000050403020100 +DATA ·c<>+8(SB)/8, $0x0000151413121110 +// EX1 +DATA ·c<>+16(SB)/8, $0x00000a0908070605 +DATA ·c<>+24(SB)/8, $0x00001a1918171615 +// EX2 +DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b +DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b + +GLOBL ·reduce<>(SB), RODATA, $32 +// 44 bit +DATA ·reduce<>+0(SB)/8, $0x0 +DATA ·reduce<>+8(SB)/8, $0xfffffffffff +// 42 bit +DATA ·reduce<>+16(SB)/8, $0x0 +DATA ·reduce<>+24(SB)/8, $0x3ffffffffff + +// h = (f*g) % (2**130-5) [partial reduction] +// uses T_0...T_9 temporary registers +// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 +// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 +#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ + \ // Eliminate the dependency for the last 2 VMSLs + VMSLG m02_0, r_2, m4_2, m4_2 \ + VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined + VMSLG m02_0, r_0, m4_0, m4_0 \ + VMSLG m02_1, r5_2, V0, T_0 \ + VMSLG m02_0, r_1, m4_1, m4_1 \ + VMSLG m02_1, r_0, V0, T_1 \ + VMSLG m02_1, r_1, V0, T_2 \ + VMSLG m02_2, r5_1, V0, T_3 \ + VMSLG m02_2, r5_2, V0, T_4 \ + VMSLG m13_0, r_0, m5_0, m5_0 \ + VMSLG m13_1, r5_2, V0, T_5 \ + VMSLG m13_0, r_1, m5_1, m5_1 \ + VMSLG m13_1, r_0, V0, T_6 \ + VMSLG m13_1, r_1, V0, T_7 \ + VMSLG m13_2, r5_1, V0, T_8 \ + VMSLG m13_2, r5_2, V0, T_9 \ + VMSLG m02_2, r_0, m4_2, m4_2 \ + VMSLG m13_2, r_0, m5_2, m5_2 \ + VAQ m4_0, T_0, m02_0 \ + VAQ m4_1, T_1, m02_1 \ + VAQ m5_0, T_5, m13_0 \ + VAQ m5_1, T_6, m13_1 \ + VAQ m02_0, T_3, m02_0 \ + VAQ m02_1, T_4, m02_1 \ + VAQ m13_0, T_8, m13_0 \ + VAQ m13_1, T_9, m13_1 \ + VAQ m4_2, T_2, m02_2 \ + VAQ m5_2, T_7, m13_2 \ + +// SQUARE uses three limbs of r and r_2*5 to output square of r +// uses T_1, T_5 and T_7 temporary registers +// input: r_0, r_1, r_2, r5_2 +// temp: TEMP0, TEMP1, TEMP2 +// output: p0, p1, p2 +#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ + VMSLG r_0, r_0, p0, p0 \ + VMSLG r_1, r5_2, V0, TEMP0 \ + VMSLG r_2, r5_2, p1, p1 \ + VMSLG r_0, r_1, V0, TEMP1 \ + VMSLG r_1, r_1, p2, p2 \ + VMSLG r_0, r_2, V0, TEMP2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + +// carry h0->h1->h2->h0 || h3->h4->h5->h3 +// uses T_2, T_4, T_5, T_7, T_8, T_9 +// t6, t7, t8, t9, t10, t11 +// input: h0, h1, h2, h3, h4, h5 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 +// output: h0, h1, h2, h3, h4, h5 +#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ + VLM (R12), t6, t7 \ // 44 and 42 bit clear mask + VLEIB $7, $0x28, t10 \ // 5 byte shift mask + VREPIB $4, t8 \ // 4 bit shift mask + VREPIB $2, t11 \ // 2 bit shift mask + VSRLB t10, h0, t0 \ // h0 byte shift + VSRLB t10, h1, t1 \ // h1 byte shift + VSRLB t10, h2, t2 \ // h2 byte shift + VSRLB t10, h3, t3 \ // h3 byte shift + VSRLB t10, h4, t4 \ // h4 byte shift + VSRLB t10, h5, t5 \ // h5 byte shift + VSRL t8, t0, t0 \ // h0 bit shift + VSRL t8, t1, t1 \ // h2 bit shift + VSRL t11, t2, t2 \ // h2 bit shift + VSRL t8, t3, t3 \ // h3 bit shift + VSRL t8, t4, t4 \ // h4 bit shift + VESLG $2, t2, t9 \ // h2 carry x5 + VSRL t11, t5, t5 \ // h5 bit shift + VN t6, h0, h0 \ // h0 clear carry + VAQ t2, t9, t2 \ // h2 carry x5 + VESLG $2, t5, t9 \ // h5 carry x5 + VN t6, h1, h1 \ // h1 clear carry + VN t7, h2, h2 \ // h2 clear carry + VAQ t5, t9, t5 \ // h5 carry x5 + VN t6, h3, h3 \ // h3 clear carry + VN t6, h4, h4 \ // h4 clear carry + VN t7, h5, h5 \ // h5 clear carry + VAQ t0, h1, h1 \ // h0->h1 + VAQ t3, h4, h4 \ // h3->h4 + VAQ t1, h2, h2 \ // h1->h2 + VAQ t4, h5, h5 \ // h4->h5 + VAQ t2, h0, h0 \ // h2->h0 + VAQ t5, h3, h3 \ // h5->h3 + VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves + VREPG $1, t7, t7 \ + VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] + VSLDB $8, h1, h1, h1 \ + VSLDB $8, h2, h2, h2 \ + VO h0, h3, h3 \ + VO h1, h4, h4 \ + VO h2, h5, h5 \ + VESRLG $44, h3, t0 \ // 44 bit shift right + VESRLG $44, h4, t1 \ + VESRLG $42, h5, t2 \ + VN t6, h3, h3 \ // clear carry bits + VN t6, h4, h4 \ + VN t7, h5, h5 \ + VESLG $2, t2, t9 \ // multiply carry by 5 + VAQ t9, t2, t2 \ + VAQ t0, h4, h4 \ + VAQ t1, h5, h5 \ + VAQ t2, h3, h3 \ + +// carry h0->h1->h2->h0 +// input: h0, h1, h2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 +// output: h0, h1, h2 +#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ + VLEIB $7, $0x28, t3 \ // 5 byte shift mask + VREPIB $4, t4 \ // 4 bit shift mask + VREPIB $2, t7 \ // 2 bit shift mask + VGBM $0x003F, t5 \ // mask to clear carry bits + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VESRLG $4, t5, t5 \ // 44 bit clear mask + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VESRLG $2, t5, t6 \ // 42 bit clear mask + VESLG $2, t2, t8 \ + VAQ t8, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VESLG $2, t2, t8 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t8, t2, t2 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + +// expands two message blocks into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in1, in2, d0, d1, d2, d3, d4, d5 +// temp: TEMP0, TEMP1, TEMP2, TEMP3 +// output: d0, d1, d2, d3, d4, d5 +#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ + VGBM $0xff3f, TEMP0 \ + VGBM $0xff1f, TEMP1 \ + VESLG $4, d1, TEMP2 \ + VESLG $4, d4, TEMP3 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in1, d0, EX0, d0 \ + VPERM in2, d3, EX0, d3 \ + VPERM in1, d2, EX2, d2 \ + VPERM in2, d5, EX2, d5 \ + VPERM in1, TEMP2, EX1, d1 \ + VPERM in2, TEMP3, EX1, d4 \ + VN TEMP0, d0, d0 \ + VN TEMP0, d3, d3 \ + VESRLG $4, d1, d1 \ + VESRLG $4, d4, d4 \ + VN TEMP1, d2, d2 \ + VN TEMP1, d5, d5 \ + VN TEMP0, d1, d1 \ + VN TEMP0, d4, d4 \ + +// expands one message block into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in, d0, d1, d2 +// temp: TEMP0, TEMP1, TEMP2 +// output: d0, d1, d2 +#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ + VGBM $0xff3f, TEMP0 \ + VESLG $4, d1, TEMP2 \ + VGBM $0xff1f, TEMP1 \ + VPERM in, d0, EX0, d0 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in, d2, EX2, d2 \ + VPERM in, TEMP2, EX1, d1 \ + VN TEMP0, d0, d0 \ + VN TEMP1, d2, d2 \ + VESRLG $4, d1, d1 \ + VN TEMP0, d1, d1 \ + +// pack h2:h0 into h1:h0 (no carry) +// input: h0, h1, h2 +// output: h0, h1, h2 +#define PACK(h0, h1, h2) \ + VMRLG h1, h2, h2 \ // copy h1 to upper half h2 + VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 + VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 + VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 + VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 + VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) + VLEIG $0, $0, h2 \ // clear upper half of h2 + VESRLG $40, h2, h1 \ // h1 now has upper two bits of result + VLEIB $7, $88, h1 \ // for byte shift (11 bytes) + VSLB h1, h2, h2 \ // shift h2 11 bytes to the left + VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 + VLEIG $0, $0, h1 \ // clear upper half of h1 + +// if h > 2**130-5 then h -= 2**130-5 +// input: h0, h1 +// temp: t0, t1, t2 +// output: h0 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 \ + +// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vmsl(SB), $0-32 + // This code processes 6 + up to 4 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + // And as moddified for VMSL as described in + // Accelerating Poly1305 Cryptographic Message Authentication on the z14 + // O'Farrell et al, CASCON 2017, p48-55 + // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht + + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + VZERO V0 // c + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 // c + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + VZERO T_2 // limbs for r + VZERO T_3 + VZERO T_4 + EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) + + // T_2, T_3, T_4: [0, r] + + // setup r*20 + VLEIG $0, $0, T_0 + VLEIG $1, $20, T_0 // T_0: [0, 20] + VZERO T_5 + VZERO T_6 + VMSLG T_0, T_3, T_5, T_5 + VMSLG T_0, T_4, T_6, T_6 + + // store r for final block in GR + VLGVG $1, T_2, RSAVE_0 // c + VLGVG $1, T_3, RSAVE_1 // c + VLGVG $1, T_4, RSAVE_2 // c + VLGVG $1, T_5, R5SAVE_1 // c + VLGVG $1, T_6, R5SAVE_2 // c + + // initialize h + VZERO H0_0 + VZERO H1_0 + VZERO H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + // initialize pointer for reduce constants + MOVD $·reduce<>(SB), R12 + + // calculate r**2 and 20*(r**2) + VZERO R_0 + VZERO R_1 + VZERO R_2 + SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) + REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) + VZERO R5_1 + VZERO R5_2 + VMSLG T_0, R_1, R5_1, R5_1 + VMSLG T_0, R_2, R5_2, R5_2 + + // skip r**4 calculation if 3 blocks or less + CMPBLE R3, $48, b4 + + // calculate r**4 and 20*(r**4) + VZERO T_8 + VZERO T_9 + VZERO T_10 + SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) + REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) + VZERO T_2 + VZERO T_3 + VMSLG T_0, T_9, T_2, T_2 + VMSLG T_0, T_10, T_3, T_3 + + // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 + VSLDB $8, T_8, T_8, T_8 + VSLDB $8, T_9, T_9, T_9 + VSLDB $8, T_10, T_10, T_10 + VSLDB $8, T_2, T_2, T_2 + VSLDB $8, T_3, T_3, T_3 + + VO T_8, R_0, R_0 + VO T_9, R_1, R_1 + VO T_10, R_2, R_2 + VO T_2, R5_1, R5_1 + VO T_3, R5_2, R5_2 + + CMPBLE R3, $80, load // less than or equal to 5 blocks in message + + // 6(or 5+1) blocks + SUB $81, R3 + VLM (R2), M0, M4 + VLL R3, 80(R2), M5 + ADD $1, R3 + MOVBZ $1, R0 + CMPBGE R3, $16, 2(PC) + VLVGB R3, R0, M5 + MOVD $96(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $2, $1, H2_0 + VLEIB $2, $1, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO T_4 + VZERO T_10 + EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M4 + VLEIB $10, $1, M2 + CMPBLT R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + SUB $16, R3 + CMPBLE R3, $0, square + +load: + // load EX0, EX1 and EX2 + MOVD $·c<>(SB), R5 + VLM (R5), EX0, EX2 + +loop: + CMPBLE R3, $64, add // b4 // last 4 or less blocks left + + // next 4 full blocks + VLM (R2), M2, M5 + SUB $64, R3 + MOVD $64(R2), R2 + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) + + // expacc in-lined to create [m2, m3] limbs + VGBM $0x3f3f, T_0 // 44 bit clear mask + VGBM $0x1f1f, T_1 // 40 bit clear mask + VPERM M2, M3, EX0, T_3 + VESRLG $4, T_0, T_0 // 44 bit clear mask ready + VPERM M2, M3, EX1, T_4 + VPERM M2, M3, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG H0_1, T_3, H0_0 + VMRHG H1_1, T_4, H1_0 + VMRHG H2_1, T_5, H2_0 + VMRLG H0_1, T_3, H0_1 + VMRLG H1_1, T_4, H1_1 + VMRLG H2_1, T_5, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VPERM M4, M5, EX0, T_3 + VPERM M4, M5, EX1, T_4 + VPERM M4, M5, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG V0, T_3, M0 + VMRHG V0, T_4, M1 + VMRHG V0, T_5, M2 + VMRLG V0, T_3, M3 + VMRLG V0, T_4, M4 + VMRLG V0, T_5, M5 + VLEIB $10, $1, M2 + VLEIB $10, $1, M5 + + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + CMPBNE R3, $0, loop + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // sum vectors + VAQ H0_0, H0_1, H0_0 + VAQ H1_0, H1_1, H1_0 + VAQ H2_0, H2_1, H2_0 + + // h may be >= 2*(2**130-5) so we need to reduce it again + // M0...M4 are used as temps here + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + +next: // carry h1->h2 + VLEIB $7, $0x28, T_1 + VREPIB $4, T_2 + VGBM $0x003F, T_3 + VESRLG $4, T_3 + + // byte shift + VSRLB T_1, H1_0, T_4 + + // bit shift + VSRL T_2, T_4, T_4 + + // clear h1 carry bits + VN T_3, H1_0, H1_0 + + // add carry + VAQ T_4, H2_0, H2_0 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H0_0, H1_0, H2_0) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H0_0, H1_0, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H0_0, H0_0 + VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) + VST H0_0, (R1) + RET + +add: + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + CMPBLE R3, $64, b4 + +b4: + CMPBLE R3, $48, b3 // 3 blocks or less + + // 4(3+1) blocks remaining + SUB $49, R3 + VLM (R2), M0, M2 + VLL R3, 48(R2), M3 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M3 + MOVD $64(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VZERO M0 + VZERO M1 + VZERO M4 + VZERO M5 + VZERO T_4 + VZERO T_10 + EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M2 + VLEIB $10, $1, M4 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + SUB $16, R3 + CMPBLE R3, $0, square // this condition must always hold true! + +b3: + CMPBLE R3, $32, b2 + + // 3 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) + + SUB $33, R3 + VLM (R2), M0, M1 + VLL R3, 32(R2), M2 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M2 + + // H += m0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAG H0_0, T_1, H0_0 + VAG H1_0, T_2, H1_0 + VAG H2_0, T_3, H2_0 + + VZERO M0 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + + // (H+m0)*r + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) + + // H += m1 + VZERO V0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + + // [H, m2] * [r**2, r] + EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, H2_0 + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) + SUB $16, R3 + CMPBLE R3, $0, next // this condition must always hold true! + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // move h to the left and 0s at the right + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + + // get message blocks and append 1 to start + SUB $17, R3 + VL (R2), M0 + VLL R3, 16(R2), M1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M1 + VZERO T_6 + VZERO T_7 + VZERO T_8 + EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) + EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) + VLEIB $2, $1, T_8 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_8 + + // add [m0, m1] to h + VAG H0_0, T_6, H0_0 + VAG H1_0, T_7, H1_0 + VAG H2_0, T_8, H2_0 + + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + VZERO M0 + + // at this point R_0 .. R5_2 look like [r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + SUB $16, R3, R3 + CMPBLE R3, $0, next + +b1: + CMPBLE R3, $0, next + + // 1 block remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + // set up [0, m0] limbs + SUB $1, R3 + VLL R3, (R2), M0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_3 + + // h+m0 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + BR next + +square: + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // (h0*r**2) + (h1*r) + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + BR next diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 00000000..1ab07d07 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 00000000..00ed9923 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,535 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, algorithmSigner}, nil + } else { + return &openSSHCertSigner{cert, signer}, nil + } +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 00000000..c0834c00 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 00000000..a65a923b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,770 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "math/bits" + + "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcmCipherID: {16, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readCipherPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writeCipherPacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readCipherPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [8]uint32 + contentKey [8]uint32 + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + for i := range c.contentKey { + c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) + } + for i := range c.lengthKey { + c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) + } + return c, nil +} + +func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) + var polyKey [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + plain := c.buf[4:contentEnd] + s.XORKeyStream(plain, plain) + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) + var polyKey [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 00000000..7b00bff1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,278 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 00000000..0590070e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,639 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + tried[auth.method()] = true + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + for _, signer := range signers { + ok, err := validateKey(signer.PublicKey(), user, c) + if err != nil { + return authFailure, nil, err + } + if !ok { + continue + } + + pub := signer.PublicKey() + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !containsMethod(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, nil +} + +func containsMethod(methods []string, method string) bool { + for _, m := range methods { + if m == method { + return true + } + } + + return false +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} + +// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. +// See RFC 4462 section 3 +// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. +// target is the server host you want to log in to. +func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { + if gssAPIClient == nil { + panic("gss-api client must be not nil with enable gssapi-with-mic") + } + return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} +} + +type gssAPIWithMICCallback struct { + gssAPIClient GSSAPIClient + target string +} + +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + m := &userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: g.method(), + } + // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. + // See RFC 4462 section 3.2. + m.Payload = appendU32(m.Payload, 1) + m.Payload = appendString(m.Payload, string(krb5OID)) + if err := c.writePacket(Marshal(m)); err != nil { + return authFailure, nil, err + } + // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an + // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or + // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. + // See RFC 4462 section 3.3. + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check + // selected mech if it is valid. + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + userAuthGSSAPIResp := &userAuthGSSAPIResponse{} + if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { + return authFailure, nil, err + } + // Start the loop into the exchange token. + // See RFC 4462 section 3.4. + var token []byte + defer g.gssAPIClient.DeleteSecContext() + for { + // Initiates the establishment of a security context between the application and a remote peer. + nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) + if err != nil { + return authFailure, nil, err + } + if len(nextToken) > 0 { + if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: nextToken, + })); err != nil { + return authFailure, nil, err + } + } + if !needContinue { + break + } + packet, err = c.readPacket() + if err != nil { + return authFailure, nil, err + } + switch packet[0] { + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthGSSAPIError: + userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} + if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { + return authFailure, nil, err + } + return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ + "Major Status: %d\n"+ + "Minor Status: %d\n"+ + "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, + userAuthGSSAPIErrorResp.Message) + case msgUserAuthGSSAPIToken: + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return authFailure, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + } + // Binding Encryption Keys. + // See RFC 4462 section 3.5. + micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") + micToken, err := g.gssAPIClient.GetMIC(micField) + if err != nil { + return authFailure, nil, err + } + if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ + MIC: micToken, + })); err != nil { + return authFailure, nil, err + } + return handleAuthResponse(c) +} + +func (g *gssAPIWithMICCallback) method() string { + return "gssapi-with-mic" +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 00000000..e55fe0ad --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,396 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden +// for the server half. +var serverForbiddenKexAlgos = map[string]struct{}{ + kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests + kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +// directionAlgorithms records algorithm choices in one direction (either read or write) +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + stoc, ctos := &result.w, &result.r + if isClient { + ctos, stoc = stoc, ctos + } + + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 00000000..fd6b0681 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 00000000..67b7322c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,21 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 00000000..2b10b05a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,647 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + isClient := len(t.hostKeys) == 0 + if isClient { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 00000000..16072004 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,789 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + + // For the following kex only the client half contains a production + // ready implementation. The server half only consists of a minimal + // implementation to satisfy the automated tests. + kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} + +// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and +// diffie-hellman-group-exchange-sha256 key agreement protocols, +// as described in RFC 4419 +type dhGEXSHA struct { + g, p *big.Int + hashFunc crypto.Hash +} + +const numMRTests = 64 + +const ( + dhGroupExchangeMinimumBits = 2048 + dhGroupExchangePreferredBits = 2048 + dhGroupExchangeMaximumBits = 8192 +) + +func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { + return nil, fmt.Errorf("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil +} + +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + // Send GexRequest + kexDHGexRequest := kexDHGexRequestMsg{ + MinBits: dhGroupExchangeMinimumBits, + PreferedBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, + } + if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { + return nil, err + } + + // Receive GexGroup + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexGroup kexDHGexGroupMsg + if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + return nil, err + } + + // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits + if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + } + + gex.p = kexDHGexGroup.P + gex.g = kexDHGexGroup.G + + // Check if p is safe by verifing that p and (p-1)/2 are primes + one := big.NewInt(1) + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + if !gex.p.ProbablyPrime(numMRTests) || !pHalf.ProbablyPrime(numMRTests) { + return nil, fmt.Errorf("ssh: server provided gex p is not safe") + } + + // Check if g is safe by verifing that g > 1 and g < p - 1 + var pMinusOne = &big.Int{} + pMinusOne.Sub(gex.p, one) + if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: server provided gex g is not safe") + } + + // Send GexInit + x, err := rand.Int(randSource, pHalf) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(gex.g, x, gex.p) + kexDHGexInit := kexDHGexInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { + return nil, err + } + + // Receive GexReply + packet, err = c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexReply kexDHGexReplyMsg + if err = Unmarshal(packet, &kexDHGexReply); err != nil { + return nil, err + } + + kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) + if err != nil { + return nil, err + } + + // Check if k is safe by verifing that k > 1 and k < p - 1 + if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: derived k is not safe") + } + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, kexDHGexReply.HostKey) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, X) + writeInt(h, kexDHGexReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHGexReply.HostKey, + Signature: kexDHGexReply.Signature, + Hash: gex.hashFunc, + }, nil +} + +// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. +// +// This is a minimal implementation to satisfy the automated tests. +func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + // Receive GexRequest + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHGexRequest kexDHGexRequestMsg + if err = Unmarshal(packet, &kexDHGexRequest); err != nil { + return + } + + // smoosh the user's preferred size into our own limits + if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits + } + if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits + } + // fix min/max if they're inconsistent. technically, we could just pout + // and hang up, but there's no harm in giving them the benefit of the + // doubt and just picking a bitsize for them. + if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { + kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits + } + if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { + kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits + } + + // Send GexGroup + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + gex.p = p + gex.g = big.NewInt(2) + + kexDHGexGroup := kexDHGexGroupMsg{ + P: gex.p, + G: gex.g, + } + if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + return nil, err + } + + // Receive GexInit + packet, err = c.readPacket() + if err != nil { + return + } + var kexDHGexInit kexDHGexInitMsg + if err = Unmarshal(packet, &kexDHGexInit); err != nil { + return + } + + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + + y, err := rand.Int(randSource, pHalf) + if err != nil { + return + } + + Y := new(big.Int).Exp(gex.g, y, gex.p) + kInt, err := gex.diffieHellman(kexDHGexInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, kexDHGexInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHGexReply := kexDHGexReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHGexReply) + + err = c.writePacket(packet) + + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: gex.hashFunc, + }, err +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 00000000..96980479 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,1100 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" +) + +// These constants represent non-default signature algorithms that are supported +// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See +// [PROTOCOL.agent] section 4.5.1 and +// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 +const ( + SigAlgoRSA = "ssh-rsa" + SigAlgoRSASHA2256 = "rsa-sha2-256" + SigAlgoRSASHA2512 = "rsa-sha2-512" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. To unmarshal the returned data, use + // the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +// A AlgorithmSigner is a Signer that also supports specifying a specific +// algorithm to use for signing. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specification of a + // non-default signing algorithm. See the SigAlgo* constants in this + // package for signature algorithms supported by this package. Callers may + // pass an empty string for the algorithm in which case the AlgorithmSigner + // will use its default algorithm. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + var hash crypto.Hash + switch sig.Format { + case SigAlgoRSA: + hash = crypto.SHA1 + case SigAlgoRSASHA2256: + hash = crypto.SHA256 + case SigAlgoRSASHA2512: + hash = crypto.SHA512 + default: + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := hash.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, "") +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + edKey := (ed25519.PublicKey)(k) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, "") +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + var hashFunc crypto.Hash + + if _, ok := s.pubKey.(*rsaPublicKey); ok { + // RSA keys support a few hash functions determined by the requested signature algorithm + switch algorithm { + case "", SigAlgoRSA: + algorithm = SigAlgoRSA + hashFunc = crypto.SHA1 + case SigAlgoRSASHA2256: + hashFunc = crypto.SHA256 + case SigAlgoRSASHA2512: + hashFunc = crypto.SHA512 + default: + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + } else { + // The only supported algorithm for all other key types is the same as the type of the key + if algorithm == "" { + algorithm = s.pubKey.Type() + } else if algorithm != s.pubKey.Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + switch key := s.pubKey.(type) { + case *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: algorithm, + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private +// key and passphrase. It supports the same keys as +// ParseRawPrivateKeyWithPassphrase. +func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + // RFC5208 - https://tools.ietf.org/html/rfc5208 + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If wrong passphrase, return +// x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + buf := block.Bytes + + if encryptedBlock(block) { + if x509.IsEncryptedPEMBlock(block) { + var err error + buf, err = x509.DecryptPEMBlock(block, passPhrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) + } + } + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(buf) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(buf) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(buf) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(buf) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { + const magic = "openssh-key-v1\x00" + if len(key) < len(magic) || string(key[:len(magic)]) != magic { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + if w.KdfName != "none" || w.CipherName != "none" { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 and rsa keys currently + switch pk1.Keytype { + case KeyAlgoRSA: + // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 + key := struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + key := struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 00000000..c07a0628 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 00000000..ac41a416 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,866 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4419, section 5. +const msgKexDHGexGroup = 31 + +type kexDHGexGroupMsg struct { + P *big.Int `sshtype:"31"` + G *big.Int +} + +const msgKexDHGexInit = 32 + +type kexDHGexInitMsg struct { + X *big.Int `sshtype:"32"` +} + +const msgKexDHGexReply = 33 + +type kexDHGexReplyMsg struct { + HostKey []byte `sshtype:"33"` + Y *big.Int + Signature []byte +} + +const msgKexDHGexRequest = 34 + +type kexDHGexRequestMsg struct { + MinBits uint32 `sshtype:"34"` + PreferedBits uint32 + MaxBits uint32 +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// See RFC 4462, section 3 +const msgUserAuthGSSAPIResponse = 60 + +type userAuthGSSAPIResponse struct { + SupportMech []byte `sshtype:"60"` +} + +const msgUserAuthGSSAPIToken = 61 + +type userAuthGSSAPIToken struct { + Token []byte `sshtype:"61"` +} + +const msgUserAuthGSSAPIMIC = 66 + +type userAuthGSSAPIMIC struct { + MIC []byte `sshtype:"66"` +} + +// See RFC 4462, section 3.9 +const msgUserAuthGSSAPIErrTok = 64 + +type userAuthGSSAPIErrTok struct { + ErrorToken []byte `sshtype:"64"` +} + +// See RFC 4462, section 3.8 +const msgUserAuthGSSAPIError = 65 + +type userAuthGSSAPIError struct { + MajorStatus uint32 `sshtype:"65"` + MinorStatus uint32 + Message string + LanguageTag string +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + case msgUserAuthGSSAPIToken: + msg = new(userAuthGSSAPIToken) + case msgUserAuthGSSAPIMIC: + msg = new(userAuthGSSAPIMIC) + case msgUserAuthGSSAPIErrTok: + msg = new(userAuthGSSAPIErrTok) + case msgUserAuthGSSAPIError: + msg = new(userAuthGSSAPIError) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +var packetTypeNames = map[byte]string{ + msgDisconnect: "disconnectMsg", + msgServiceRequest: "serviceRequestMsg", + msgServiceAccept: "serviceAcceptMsg", + msgKexInit: "kexInitMsg", + msgKexDHInit: "kexDHInitMsg", + msgKexDHReply: "kexDHReplyMsg", + msgUserAuthRequest: "userAuthRequestMsg", + msgUserAuthSuccess: "userAuthSuccessMsg", + msgUserAuthFailure: "userAuthFailureMsg", + msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", + msgGlobalRequest: "globalRequestMsg", + msgRequestSuccess: "globalRequestSuccessMsg", + msgRequestFailure: "globalRequestFailureMsg", + msgChannelOpen: "channelOpenMsg", + msgChannelData: "channelDataMsg", + msgChannelOpenConfirm: "channelOpenConfirmMsg", + msgChannelOpenFailure: "channelOpenFailureMsg", + msgChannelWindowAdjust: "windowAdjustMsg", + msgChannelEOF: "channelEOFMsg", + msgChannelClose: "channelCloseMsg", + msgChannelRequest: "channelRequestMsg", + msgChannelSuccess: "channelRequestSuccessMsg", + msgChannelFailure: "channelRequestFailureMsg", +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 00000000..f1901627 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 00000000..7a5a1d7a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,716 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +type GSSAPIWithMICConfig struct { + // AllowLogin, must be set, is called when gssapi-with-mic + // authentication is selected (RFC 4462 section 3). The srcName is from the + // results of the GSS-API authentication. The format is username@DOMAIN. + // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. + // This callback is called after the user identity is established with GSSAPI to decide if the user can login with + // which permissions. If the user is allowed to login, it should return a nil error. + AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) + + // Server must be set. It's the implementation + // of the GSSAPIServer interface. See GSSAPIServer interface for details. + Server GSSAPIServer +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string + + // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used + // when gssapi-with-mic authentication is selected (RFC 4462 section 3). + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + // Check if the config contains any unsupported key exchanges + for _, kex := range fullConf.KeyExchanges { + if _, ok := serverForbiddenKexAlgos[kex]; ok { + return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) + } + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && + config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || + config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, + sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { + gssAPIServer := gssapiConfig.Server + defer gssAPIServer.DeleteSecContext() + var srcName string + for { + var ( + outToken []byte + needContinue bool + ) + outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) + if err != nil { + return err, nil, nil + } + if len(outToken) != 0 { + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: outToken, + })); err != nil { + return nil, nil, err + } + } + if !needContinue { + break + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, nil, err + } + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} + if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { + return nil, nil, err + } + mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) + if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { + return err, nil, nil + } + perms, authErr = gssapiConfig.AllowLogin(s, srcName) + return authErr, perms, nil +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + var authErrs []error + var displayedBanner bool + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + + return nil, discMsg + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + + // allow initial attempt of 'none' without penalty + if authFailures == 0 { + authFailures-- + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configured") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + case "gssapi-with-mic": + gssapiConfig := config.GSSAPIWithMICConfig + userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) + if err != nil { + return nil, parseError(msgUserAuthRequest) + } + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. + if userAuthRequestGSSAPI.N == 0 { + authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") + break + } + var i uint32 + present := false + for i = 0; i < userAuthRequestGSSAPI.N; i++ { + if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { + present = true + break + } + } + if !present { + authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") + break + } + // Initial server response, see RFC 4462 section 3.3. + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ + SupportMech: krb5OID, + })); err != nil { + return nil, err + } + // Exchange token, see RFC 4462 section 3.4. + packet, err := s.transport.readPacket() + if err != nil { + return nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, err + } + authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, + userAuthReq) + if err != nil { + return nil, err + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + authFailures++ + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && + config.GSSAPIWithMICConfig.AllowLogin != nil { + failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 00000000..d3321f6b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go new file mode 100644 index 00000000..24bd7c8e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/asn1" + "errors" +) + +var krb5OID []byte + +func init() { + krb5OID, _ = asn1.Marshal(krb5Mesh) +} + +// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. +type GSSAPIClient interface { + // InitSecContext initiates the establishment of a security context for GSS-API between the + // ssh client and ssh server. Initially the token parameter should be specified as nil. + // The routine may return a outputToken which should be transferred to + // the ssh server, where the ssh server will present it to + // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting + // needContinue to false. To complete the context + // establishment, one or more reply tokens may be required from the ssh + // server;if so, InitSecContext will return a needContinue which is true. + // In this case, InitSecContext should be called again when the + // reply token is received from the ssh server, passing the reply + // token to InitSecContext via the token parameters. + // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. + InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) + // GetMIC generates a cryptographic MIC for the SSH2 message, and places + // the MIC in a token for transfer to the ssh server. + // The contents of the MIC field are obtained by calling GSS_GetMIC() + // over the following, using the GSS-API context that was just + // established: + // string session identifier + // byte SSH_MSG_USERAUTH_REQUEST + // string user name + // string service + // string "gssapi-with-mic" + // See RFC 2743 section 2.3.1 and RFC 4462 3.5. + GetMIC(micFiled []byte) ([]byte, error) + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. +type GSSAPIServer interface { + // AcceptSecContext allows a remotely initiated security context between the application + // and a remote peer to be established by the ssh client. The routine may return a + // outputToken which should be transferred to the ssh client, + // where the ssh client will present it to InitSecContext. + // If no token need be sent, AcceptSecContext will indicate this + // by setting the needContinue to false. To + // complete the context establishment, one or more reply tokens may be + // required from the ssh client. if so, AcceptSecContext + // will return a needContinue which is true, in which case it + // should be called again when the reply token is received from the ssh + // client, passing the token to AcceptSecContext via the + // token parameters. + // The srcName return value is the authenticated username. + // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. + AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) + // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, + // fits the supplied message is received from the ssh client. + // See RFC 2743 section 2.3.2. + VerifyMIC(micField []byte, micToken []byte) error + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +var ( + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, + // so we also support the krb5 mechanism only. + // See RFC 1964 section 1. + krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} +) + +// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST +// See RFC 4462 section 3.2. +type userAuthRequestGSSAPI struct { + N uint32 + OIDS []asn1.ObjectIdentifier +} + +func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { + n, rest, ok := parseUint32(payload) + if !ok { + return nil, errors.New("parse uint32 failed") + } + s := &userAuthRequestGSSAPI{ + N: n, + OIDS: make([]asn1.ObjectIdentifier, n), + } + for i := 0; i < int(n); i++ { + var ( + desiredMech []byte + err error + ) + desiredMech, rest, ok = parseString(rest) + if !ok { + return nil, errors.New("parse string failed") + } + if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { + return nil, err + } + + } + return s, nil +} + +// See RFC 4462 section 3.6. +func buildMIC(sessionID string, username string, service string, authMethod string) []byte { + out := make([]byte, 0, 0) + out = appendString(out, sessionID) + out = append(out, msgUserAuthRequest) + out = appendString(out, username) + out = appendString(out, service) + out = appendString(out, authMethod) + return out +} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go new file mode 100644 index 00000000..b171b330 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -0,0 +1,116 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 00000000..80d35f5e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,474 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 00000000..49ddc2e7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,353 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writeCipherPacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readCipherPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + macKey := make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 00000000..06f84b85 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 00000000..ed8da8de --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,60 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns binary.LittleEndian on little-endian machines and +// binary.BigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "ppc64le", + "riscv", "riscv64": + return littleEndian{} + case "armbe", "arm64be", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 00000000..b4e6ecb2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go new file mode 100644 index 00000000..be602722 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix,ppc64 + +package cpu + +const cacheLineSize = 128 + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func init() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 00000000..981af681 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 00000000..568bcd03 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 00000000..f7cb4697 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 00000000..e363c7d1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 00000000..ba49b91b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 00000000..aa986f77 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 00000000..10e712dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false + return + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 00000000..2057006d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 00000000..fa7fb1bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 00000000..6c8d975d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 00000000..d579eaef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // cryptography facilities + msa4 facility = 77 // message-security-assist extension 4 + msa8 facility = 146 // message-security-assist extension 8 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 00000000..f55e0c82 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 00000000..cda87b1a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 00000000..dd1e76dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 00000000..e5037d92 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 00000000..bd9bbda0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 00000000..d70d317f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<>1&(m0&m) + x&(m0&m) - // x = x>>2&(m1&m) + x&(m1&m) - // x = x>>4&(m2&m) + x&(m2&m) - // x = x>>8&(m3&m) + x&(m3&m) - // x = x>>16&(m4&m) + x&(m4&m) - // x = x>>32&(m5&m) + x&(m5&m) - // return int(x) - // - // Masking (& operations) can be left away when there's no - // danger that a field's sum will carry over into the next - // field: Since the result cannot be > 64, 8 bits is enough - // and we can ignore the masks for the shifts by 8 and up. - // Per "Hacker's Delight", the first line can be simplified - // more, but it saves at best one instruction, so we leave - // it alone for clarity. - const m = 1<<64 - 1 - x = x>>1&(m0&m) + x&(m0&m) - x = x>>2&(m1&m) + x&(m1&m) - x = (x>>4 + x) & (m2 & m) - x += x >> 8 - x += x >> 16 - x += x >> 32 - return int(x) & (1<<7 - 1) -} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index f121a8d6..3559e5dc 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -6,7 +6,19 @@ package unix -import "runtime" +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // @@ -14,7 +26,7 @@ import "runtime" func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctlSetWinsize(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } @@ -24,7 +36,30 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctlSetTermios(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 5a22eca9..890ec464 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -212,9 +212,11 @@ esac echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; elif [ "$GOOS" == "darwin" ]; then # pre-1.12, direct syscalls - echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; + echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; # 1.12 and later, syscalls via libSystem echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; + # 1.13 and later, syscalls via libSystem (including syscallPtr) + echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; else echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 14624b95..67b84828 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -60,6 +60,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -80,6 +81,7 @@ includes_Darwin=' includes_DragonFly=' #include #include +#include #include #include #include @@ -103,6 +105,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -179,24 +182,31 @@ struct ltchars { #include #include #include +#include #include #include #include #include +#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include #include #include #include @@ -206,26 +216,23 @@ struct ltchars { #include #include #include +#include #include +#include #include #include +#include #include -#include #include #include -#include -#include -#include #include -#include -#include +#include #include -#include +#include +#include +#include #include -#include -#include -#include -#include + #include #include @@ -264,6 +271,11 @@ struct ltchars { #define FS_KEY_DESC_PREFIX "fscrypt:" #define FS_KEY_DESC_PREFIX_SIZE 8 #define FS_MAX_KEY_SIZE 64 + +// The code generator produces -0x1 for (~0), but an unsigned value is necessary +// for the tipc_subscr timeout __u32 field. +#undef TIPC_WAIT_FOREVER +#define TIPC_WAIT_FOREVER 0xffffffff ' includes_NetBSD=' @@ -273,6 +285,7 @@ includes_NetBSD=' #include #include #include +#include #include #include #include @@ -299,6 +312,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -335,6 +349,7 @@ includes_OpenBSD=' includes_SunOS=' #include #include +#include #include #include #include @@ -427,6 +442,7 @@ ccflags="$@" $2 == "XCASE" || $2 == "ALTWERASE" || $2 == "NOKERNINFO" || + $2 == "NFDBITS" || $2 ~ /^PAR/ || $2 ~ /^SIG[^_]/ || $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || @@ -451,6 +467,7 @@ ccflags="$@" $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^KEXEC_/ || @@ -506,6 +523,7 @@ ccflags="$@" $2 ~ /^XDP_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || + $2 ~ /^TIPC_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 1aa065f9..9ad8a0d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -350,49 +350,12 @@ func (w WaitStatus) Signal() Signal { func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } -func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } +func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, // Therefore, the programmer must call dup2 instead of fcntl in this case. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index bf05603f..b3c8e330 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 13d4321f..9a6e0241 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 97a8eef6..3e667142 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -413,8 +413,6 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e return kevent(kq, change, len(changes), event, len(events), timeout) } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // sysctlmib translates name to mib number and appends any additional args. func sysctlmib(name string, args ...int) ([]_C_int, error) { // Translate name to mib number. diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go new file mode 100644 index 00000000..6a15cba6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.12,!go1.13 + +package unix + +import ( + "unsafe" +) + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // To implement this using libSystem we'd need syscall_syscallPtr for + // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall + // back to raw syscalls for this func on Go 1.12. + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + return n, errnoErr(e1) + } + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go new file mode 100644 index 00000000..24960c38 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -0,0 +1,103 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.13 + +package unix + +import "unsafe" + +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fdopendir_trampoline() + +//go:linkname libc_fdopendir libc_fdopendir +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + const ptrSize = unsafe.Sizeof(uintptr(0)) + + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + // Copy entry into return buffer. + s := struct { + ptr unsafe.Pointer + siz int + cap int + }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen} + copy(buf, *(*[]byte)(unsafe.Pointer(&s))) + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 3e1cdfb5..c5018a38 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -339,43 +339,6 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -497,7 +460,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go new file mode 100644 index 00000000..6b223f91 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,386,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index cd8be182..dd756e70 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,7 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -45,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -58,7 +63,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go new file mode 100644 index 00000000..68ebd6fa --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,amd64,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index d0d07243..7f148c42 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,7 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -45,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -58,7 +63,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go new file mode 100644 index 00000000..c81510da --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,386,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 01e8a38a..58be02e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -12,6 +12,10 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -45,6 +49,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -62,7 +70,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go new file mode 100644 index 00000000..01d45040 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,arm64,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index e674f81d..1ee931f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -14,6 +14,10 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -47,6 +51,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -64,7 +72,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 4b4ae460..f34c86c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -15,6 +15,7 @@ func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // 32-bit only func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) //go:linkname syscall_syscall syscall.syscall //go:linkname syscall_syscall6 syscall.syscall6 @@ -22,6 +23,7 @@ func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, er //go:linkname syscall_syscall9 syscall.syscall9 //go:linkname syscall_rawSyscall syscall.rawSyscall //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 +//go:linkname syscall_syscallPtr syscall.syscallPtr // Find the entry point for f. See comments in runtime/proc.go for the // function of the same name. diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 260a400f..8c8d5029 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -14,6 +14,8 @@ package unix import "unsafe" +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -150,43 +152,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) if err != nil { @@ -325,7 +290,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 9babb31e..a6b4830a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 329d240b..25ac9340 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -36,6 +36,8 @@ var ( // INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h const _ino64First = 1200031 +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -201,43 +203,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -688,7 +653,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 21e03958..dcc56457 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9c945a65..321c3bac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5cd6243f..69770083 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a3180548..dbbbfd60 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 637b5017..ebf3195b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -71,6 +71,17 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than @@ -80,52 +91,18 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) @@ -798,6 +775,70 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } +// SockaddrTIPC implements the Sockaddr interface for AF_TIPC type sockets. +// For more information on TIPC, see: http://tipc.sourceforge.net/. +type SockaddrTIPC struct { + // Scope is the publication scopes when binding service/service range. + // Should be set to TIPC_CLUSTER_SCOPE or TIPC_NODE_SCOPE. + Scope int + + // Addr is the type of address used to manipulate a socket. Addr must be + // one of: + // - *TIPCSocketAddr: "id" variant in the C addr union + // - *TIPCServiceRange: "nameseq" variant in the C addr union + // - *TIPCServiceName: "name" variant in the C addr union + // + // If nil, EINVAL will be returned when the structure is used. + Addr TIPCAddr + + raw RawSockaddrTIPC +} + +// TIPCAddr is implemented by types that can be used as an address for +// SockaddrTIPC. It is only implemented by *TIPCSocketAddr, *TIPCServiceRange, +// and *TIPCServiceName. +type TIPCAddr interface { + tipcAddrtype() uint8 + tipcAddr() [12]byte +} + +func (sa *TIPCSocketAddr) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCSocketAddr{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCSocketAddr) tipcAddrtype() uint8 { return TIPC_SOCKET_ADDR } + +func (sa *TIPCServiceRange) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceRange{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceRange) tipcAddrtype() uint8 { return TIPC_SERVICE_RANGE } + +func (sa *TIPCServiceName) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceName{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceName) tipcAddrtype() uint8 { return TIPC_SERVICE_ADDR } + +func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Addr == nil { + return nil, 0, EINVAL + } + + sa.raw.Family = AF_TIPC + sa.raw.Scope = int8(sa.Scope) + sa.raw.Addrtype = sa.Addr.tipcAddrtype() + sa.raw.Addr = sa.Addr.tipcAddr() + + return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -923,6 +964,27 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } + return sa, nil + case AF_TIPC: + pp := (*RawSockaddrTIPC)(unsafe.Pointer(rsa)) + + sa := &SockaddrTIPC{ + Scope: int(pp.Scope), + } + + // Determine which union variant is present in pp.Addr by checking + // pp.Addrtype. + switch pp.Addrtype { + case TIPC_SERVICE_RANGE: + sa.Addr = (*TIPCServiceRange)(unsafe.Pointer(&pp.Addr)) + case TIPC_SERVICE_ADDR: + sa.Addr = (*TIPCServiceName)(unsafe.Pointer(&pp.Addr)) + case TIPC_SOCKET_ADDR: + sa.Addr = (*TIPCSocketAddr)(unsafe.Pointer(&pp.Addr)) + default: + return nil, EINVAL + } + return sa, nil } return nil, EAFNOSUPPORT @@ -1160,6 +1222,34 @@ func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) } +// KeyctlRestrictKeyring implements the KEYCTL_RESTRICT_KEYRING command. This +// command limits the set of keys that can be linked to the keyring, regardless +// of keyring permissions. The command requires the "setattr" permission. +// +// When called with an empty keyType the command locks the keyring, preventing +// any further keys from being linked to the keyring. +// +// The "asymmetric" keyType defines restrictions requiring key payloads to be +// DER encoded X.509 certificates signed by keys in another keyring. Restrictions +// for "asymmetric" include "builtin_trusted", "builtin_and_secondary_trusted", +// "key_or_keyring:", and "key_or_keyring::chain". +// +// As of Linux 4.12, only the "asymmetric" keyType defines type-specific +// restrictions. +// +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_restrict_keyring.3.html +// http://man7.org/linux/man-pages/man2/keyctl.2.html +func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error { + if keyType == "" { + return keyctlRestrictKeyring(KEYCTL_RESTRICT_KEYRING, ringid) + } + return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) +} + +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL + func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny @@ -1403,8 +1493,12 @@ func PtraceSyscall(pid int, signal int) (err error) { func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } +func PtraceInterrupt(pid int) (err error) { return ptrace(PTRACE_INTERRUPT, pid, 0, 0) } + func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } +func PtraceSeize(pid int) (err error) { return ptrace(PTRACE_SEIZE, pid, 0, 0) } + func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) @@ -1761,6 +1855,17 @@ func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err erro return openByHandleAt(mountFD, handle.fileHandle, flags) } +// Klogset wraps the sys_syslog system call; it sets console_loglevel to +// the value specified by arg and passes a dummy pointer to bufp. +func Klogset(typ int, arg int) (err error) { + var p unsafe.Pointer + _, _, errno := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(p), uintptr(arg)) + if errno != 0 { + return errnoErr(errno) + } + return nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index e2f8cf6e..e7fa665e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -372,6 +372,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 87a30744..088ce0f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -163,6 +163,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index f6267944..11930fc8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -252,6 +252,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cb20b15d..251e2d97 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -180,6 +180,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index b3b21ec1..7562fe97 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -208,6 +208,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 5144d4e1..a939ff8f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -220,6 +220,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0a100b66..28d6d0f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -91,6 +91,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6230f640..6798c262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -179,6 +179,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index f81dbdc9..eb5cb1a7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -120,6 +120,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index b6956561..37321c12 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -107,6 +107,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5ef30904..f95463ee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -187,43 +189,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -365,7 +330,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24f74e58..24da8b52 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 6878bf7f..25a0ac82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index dbbfcf71..21591ecd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index f3434465..80474963 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 1a074b2f..7fe65ef7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -178,43 +180,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -340,7 +305,7 @@ func Uname(uname *Utsname) error { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d62da60d..42b5a0e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a35334c..6ea4b488 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 5d812aae..1c3d26fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 0fb39cf5..a8c458cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0153a316..62f968c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -553,40 +553,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetTermio(fd int, req uint) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -679,7 +649,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 91c32ddf..b22a34d7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -18,6 +18,10 @@ func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 3b39d740..6217cdba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -3,7 +3,7 @@ // +build 386,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 8fe55477..e3ff2ee3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -3,7 +3,7 @@ // +build amd64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 7a977770..3e417571 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -3,7 +3,7 @@ // +build arm,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 6d56d8a0..cbd8ed18 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -3,7 +3,7 @@ // +build arm64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index bbe6089b..61304717 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -938,6 +938,7 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_MAXID = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index d2bbaabc..b72544fc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -3,7 +3,7 @@ // +build 386,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -1055,6 +1055,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4f8db783..9f382678 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1056,6 +1056,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 53e5de60..16db56ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -3,7 +3,7 @@ // +build arm,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -1063,6 +1063,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index d4a192fe..1a1de345 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1056,6 +1056,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 3fb475bc..fcf5796a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1087,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1099,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1344,6 +1361,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1408,6 +1426,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1688,6 +1712,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1726,6 +1751,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1786,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2469,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2547,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2746,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2764,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9c4e19f9..5bcf3dbd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1087,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1099,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1344,6 +1361,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1408,6 +1426,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1674,6 +1696,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1689,6 +1713,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1727,6 +1752,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1787,7 +1816,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1860,6 +1889,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1884,6 +1914,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1891,7 +1922,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1903,6 +1934,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1911,8 +1943,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1997,6 +2029,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2135,6 +2169,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2435,6 +2470,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2448,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2746,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2764,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a1f038c0..3e02dcff 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1716,7 @@ const ( PTRACE_GETSIGMASK = 0x420a PTRACE_GETVFPREGS = 0x1b PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x16 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1732,6 +1757,10 @@ const ( PTRACE_SET_SYSCALL = 0x17 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PT_DATA_ADDR = 0x10004 PT_TEXT_ADDR = 0x10000 @@ -1793,7 +1822,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1866,6 +1895,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1890,6 +1920,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1897,7 +1928,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1909,6 +1940,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1917,8 +1949,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2003,6 +2035,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2141,6 +2175,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2441,6 +2476,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2454,7 +2554,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2652,6 +2752,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2668,6 +2770,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 504ce138..2293f8bb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -561,6 +564,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1089,6 +1093,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1101,6 +1116,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1345,6 +1362,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1409,6 +1427,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1674,6 +1696,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1687,6 +1711,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1719,6 +1744,12 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1777,7 +1808,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1850,6 +1881,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1874,6 +1906,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1881,7 +1914,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1893,6 +1926,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1901,8 +1935,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1987,6 +2021,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2125,6 +2161,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2426,6 +2463,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2439,7 +2541,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2637,6 +2739,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2653,6 +2757,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 58b64290..57742ea2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 35e33de6..33bfa6cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 574fcd8c..89fd414e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index cdf0cf5f..aabe5e42 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eefdb328..27227912 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1341,6 +1358,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1407,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1716,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1756,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1844,7 +1873,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1917,6 +1946,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1941,6 +1971,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1948,7 +1979,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1960,6 +1991,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1968,8 +2000,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2054,6 +2086,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2192,6 +2226,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2496,6 +2531,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2509,7 +2609,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2707,6 +2807,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2723,6 +2825,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 78db2104..e33be416 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1341,6 +1358,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1407,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1716,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1756,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1844,7 +1873,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1917,6 +1946,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1941,6 +1971,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1948,7 +1979,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1960,6 +1991,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1968,8 +2000,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2054,6 +2086,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2192,6 +2226,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2496,6 +2531,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2509,7 +2609,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2707,6 +2807,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2723,6 +2825,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0cd07f93..b9908d30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1684,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1716,6 +1741,10 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1774,7 +1803,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1847,6 +1876,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1871,6 +1901,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1878,7 +1909,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1890,6 +1921,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1898,8 +1930,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1984,6 +2016,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2122,6 +2156,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2422,6 +2457,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2435,7 +2535,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2633,6 +2733,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2649,6 +2751,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index ac4f1d9f..85647f4f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1695,8 @@ const ( PTRACE_DETACH = 0x11 PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1687,6 +1711,7 @@ const ( PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1730,6 +1755,10 @@ const ( PTRACE_SINGLEBLOCK = 0xc PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TE_ABORT_RAND = 0x5011 PTRACE_TRACEME = 0x0 PT_ACR0 = 0x90 @@ -1847,7 +1876,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1920,6 +1949,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1944,6 +1974,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1951,7 +1982,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1963,6 +1994,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1971,8 +2003,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2057,6 +2089,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2195,6 +2229,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2495,6 +2530,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2508,7 +2608,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2706,6 +2806,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2722,6 +2824,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 8a12f141..c0095a54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -256,6 +256,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -307,9 +308,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -463,6 +465,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -564,6 +567,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1090,6 +1094,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1102,6 +1117,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1346,6 +1363,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1410,6 +1428,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1675,6 +1697,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1716,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1756,10 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SPARC_DETACH = 0xb PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PTRACE_WRITEDATA = 0x11 PTRACE_WRITETEXT = 0x13 @@ -1839,7 +1868,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1912,6 +1941,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1936,6 +1966,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1943,7 +1974,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1955,6 +1986,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1963,8 +1995,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2049,6 +2081,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2187,6 +2221,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x47 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2484,6 +2519,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x20005437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2497,7 +2597,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2695,6 +2795,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2711,6 +2813,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 78cc04ea..96b9b8ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -3,7 +3,7 @@ // +build 386,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -1085,6 +1085,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 92185e69..ed522a84 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 373ad454..c8d36fe9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -3,7 +3,7 @@ // +build arm,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go package unix @@ -1065,6 +1065,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index fb6c6044..f1c146a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index d8be0451..5402bd55 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -3,7 +3,7 @@ // +build 386,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -881,14 +881,15 @@ const ( MADV_SPACEAVAIL = 0x5 MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 - MAP_COPY = 0x4 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x1ff7 - MAP_HASSEMAPHORE = 0x200 - MAP_INHERIT = 0x80 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 - MAP_INHERIT_DONATE_COPY = 0x3 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 MAP_NOEXTEND = 0x100 @@ -896,7 +897,8 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_SHARED = 0x1 - MAP_TRYFIXED = 0x400 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -946,6 +948,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 1f9e8a29..ffaf2d2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -920,10 +920,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x7ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -990,6 +991,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 79d5695c..7aa796a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,11 +1,11 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - // +build arm,openbsd +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- _const.go + package unix import "syscall" @@ -881,10 +881,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x3ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -896,6 +897,7 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x0 MAP_SHARED = 0x1 + MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 @@ -947,6 +949,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ec5f92de..1792d3f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -996,6 +996,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 22569db3..46e054cc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -3,7 +3,7 @@ // +build amd64,solaris -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -666,6 +666,7 @@ const ( M_FLUSH = 0x86 NAME_MAX = 0xff NEWDEV = 0x1 + NFDBITS = 0x40 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index dd5ea36e..b42c1cba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,386,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1342,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1681,6 +1666,39 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -1738,23 +1756,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go new file mode 100644 index 00000000..e263fbdb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,386,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s new file mode 100644 index 00000000..00da1ebf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go 386 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 78ca9233..603c9f6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1857,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2326,6 +2321,27 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -2408,28 +2424,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index f40465ca..ece6f67c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 @@ -262,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 @@ -272,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 2581e896..38b7cbab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,amd64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,6 +1666,49 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) @@ -1738,23 +1756,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go new file mode 100644 index 00000000..314042a9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,amd64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s new file mode 100644 index 00000000..d671e831 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go amd64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 64df03c4..fda478e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1872,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2341,6 +2321,27 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -2423,28 +2424,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index debcb8ed..7c4d5901 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 @@ -274,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index f8caecef..abb69183 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go new file mode 100644 index 00000000..f519ce9a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s new file mode 100644 index 00000000..488e5570 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index ed330623..163b3912 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1857,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 66af9f48..5bebb1bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 3fd0f3c8..b75c11d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go new file mode 100644 index 00000000..d64e6c80 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s new file mode 100644 index 00000000..b29dabb0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 5258a732..7c5bd510 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1857,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index f57f48f8..96ab9877 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index cdfe9318..df199b34 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1272,8 +1272,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index a783306b..e68185f1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index f995520d..2f77f93c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,8 +361,14 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -387,8 +377,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -424,6 +414,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d681acd4..e9a12c9d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,8 +361,14 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -387,8 +377,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -424,6 +414,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 5049b2ed..27ab0fbd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,8 +404,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +414,8 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c5e46e4c..fe5d462e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index da8819e4..536abcea 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 6ad9be6d..37823cd6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f8833178..794f6126 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 8eebc6c7..1b34b550 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ecf62a67..5714e259 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1ba0f7b6..88a6b336 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 20012b2f..c09dbe34 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2b520dea..42f6c210 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index d9f044c9..de2cd8db 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9feed65e..d51bf07f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 0a651508..1e3a3cb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index e27f6693..3c97008c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 7e058266..5ade42cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d94d076a..3e0bbc5f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cf5bf3d0..cb0af13a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 243a9317..6fd48d3d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a9532d07..2938e412 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0cb9f017..22b79ab0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6fc99b54..cb921f37 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 27878a72..5a743803 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 5f614760..a96165d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1478,8 +1478,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = e1 } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index e869c060..7aae554f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -429,4 +429,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 4917b8ab..7968439a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -351,4 +351,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index f85fcb4f..3c663c69 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 678a119b..753def98 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -296,4 +296,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 222c9f9a..ac86bd54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 28e6d0e9..1f5705b5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index e643c6f6..d9ed9532 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 01d93c42..94266b65 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5744149e..52e3da64 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 21c83204..6141f90a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index c1bb6d8f..4f7261a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -295,4 +295,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index bc3cc6b5..f47014ac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -358,4 +358,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 0a2841ba..dd78abb0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -373,4 +373,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 50bc4128..74d42bb5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -425,6 +432,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -614,6 +622,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -664,6 +673,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2521,3 +2537,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 055eaa76..8debef94 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -615,6 +623,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +674,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2535,3 +2551,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 66019c9c..feb7d837 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -429,6 +436,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -618,6 +626,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -668,6 +677,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2512,3 +2528,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3104798c..6da21783 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2514,3 +2530,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 46c86021..14b1dea6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2518,3 +2534,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index c2fe1a62..0fb94a76 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2516,3 +2532,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f1eb0d39..7ffc7bbc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2516,3 +2532,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 8759bc36..12ef8eb4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2518,3 +2534,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a8120054..cb89d8a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2524,3 +2540,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 74b7a919..d9c93aff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2524,3 +2540,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ccea3e63..a198cc52 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2542,3 +2558,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d8fc0bc1..f1e26c56 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -615,6 +623,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +674,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2538,3 +2554,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 5e0ab932..d2824804 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -430,6 +437,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -619,6 +627,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -669,6 +678,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2519,3 +2535,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go new file mode 100644 index 00000000..af3af60d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build go1.9 + +package windows + +import "syscall" + +type Errno = syscall.Errno +type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 00000000..d7771134 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,386 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// We need to use LoadLibrary and GetProcAddress from the Go runtime, because +// the these symbols are loaded by the system linker and are required to +// dynamically load additional symbols. Note that in the Go runtime, these +// return syscall.Handle and syscall.Errno, but these are the same, in fact, +// as windows.Handle and windows.Errno, and we intend to keep these the same. + +//go:linkname syscall_loadlibrary syscall.loadlibrary +func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) + +//go:linkname syscall_getprocaddress syscall.getprocaddress +func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := syscall_loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: h, + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := syscall_getprocaddress(d.Handle, namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + systemdir, err := GetSystemDirectory() + if err != nil { + return nil, err + } + loadDLL = systemdir + "\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 00000000..f482a9fa --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,61 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +// Returns a default environment associated with the token, rather than the current +// process. If inheritExisting is true, then this environment also inherits the +// environment of the current process. +func (token Token) Environ(inheritExisting bool) (env []string, err error) { + var block *uint16 + err = CreateEnvironmentBlock(&block, token, inheritExisting) + if err != nil { + return nil, err + } + defer DestroyEnvironmentBlock(block) + blockp := uintptr(unsafe.Pointer(block)) + for { + entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:] + for i, v := range entry { + if v == 0 { + entry = entry[:i] + break + } + } + if len(entry) == 0 { + break + } + env = append(env, string(utf16.Decode(entry))) + blockp += 2 * (uintptr(len(entry)) + 1) + } + return env, nil +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 00000000..40af946e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 00000000..3606c3a8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,97 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 00000000..f80a4204 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x01 + PAGE_READONLY = 0x02 + PAGE_READWRITE = 0x04 + PAGE_WRITECOPY = 0x08 + PAGE_EXECUTE_READ = 0x20 + PAGE_EXECUTE_READWRITE = 0x40 + PAGE_EXECUTE_WRITECOPY = 0x80 +) diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash new file mode 100644 index 00000000..2163843a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -0,0 +1,63 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" +[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } + +declare -A errors + +{ + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "import \"syscall\"" + echo "const (" + + while read -r line; do + unset vtype + if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + else + continue + fi + [[ -n $key && -n $value ]] || continue + [[ -z ${errors["$key"]} ]] || continue + errors["$key"]="$value" + if [[ -v vtype ]]; then + if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then + vtype="" + elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then + vtype="Handle" + else + vtype="syscall.Errno" + fi + last_vtype="$vtype" + else + vtype="" + if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then + value="S_OK" + elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then + value="ERROR_SUCCESS" + fi + fi + + echo "$key $vtype = $value" + done < "$winerror" + + echo ")" +} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 00000000..ab8924e9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 00000000..328e3b2a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build generate + +package windows + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 00000000..a74e3e24 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 00000000..e44a3cbf --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 00000000..c605ee6a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,1396 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid +//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid +//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount +//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority +//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// SID into a valid, functional SID. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier SID for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts SID to a string format suitable for display, storage, or transmission. +func (sid *SID) String() string { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "" + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(s))[:]) +} + +// Len returns the length, in bytes, of a valid security identifier SID. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier SID. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// IdentifierAuthority returns the identifier authority of the SID. +func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { + return *getSidIdentifierAuthority(sid) +} + +// SubAuthorityCount returns the number of sub-authorities in the SID. +func (sid *SID) SubAuthorityCount() uint8 { + return *getSidSubAuthorityCount(sid) +} + +// SubAuthority returns the sub-authority of the SID as specified by +// the index, which must be less than sid.SubAuthorityCount(). +func (sid *SID) SubAuthority(idx uint32) uint32 { + if idx >= uint32(sid.SubAuthorityCount()) { + panic("sub-authority index out of range") + } + return *getSidSubAuthority(sid, idx) +} + +// IsValid returns whether the SID has a valid revision and length. +func (sid *SID) IsValid() bool { + return isValidSid(sid) +} + +// Equals compares two SIDs for equality. +func (sid *SID) Equals(sid2 *SID) bool { + return EqualSid(sid, sid2) +} + +// IsWellKnown determines whether the SID matches the well-known sidType. +func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { + return isWellKnownSid(sid, sidType) +} + +// LookupAccount retrieves the name of the account for this SID +// and the name of the first domain on which this SID is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +// Various types of pre-specified SIDs that can be synthesized and compared at runtime. +type WELL_KNOWN_SID_TYPE uint32 + +const ( + WinNullSid = 0 + WinWorldSid = 1 + WinLocalSid = 2 + WinCreatorOwnerSid = 3 + WinCreatorGroupSid = 4 + WinCreatorOwnerServerSid = 5 + WinCreatorGroupServerSid = 6 + WinNtAuthoritySid = 7 + WinDialupSid = 8 + WinNetworkSid = 9 + WinBatchSid = 10 + WinInteractiveSid = 11 + WinServiceSid = 12 + WinAnonymousSid = 13 + WinProxySid = 14 + WinEnterpriseControllersSid = 15 + WinSelfSid = 16 + WinAuthenticatedUserSid = 17 + WinRestrictedCodeSid = 18 + WinTerminalServerSid = 19 + WinRemoteLogonIdSid = 20 + WinLogonIdsSid = 21 + WinLocalSystemSid = 22 + WinLocalServiceSid = 23 + WinNetworkServiceSid = 24 + WinBuiltinDomainSid = 25 + WinBuiltinAdministratorsSid = 26 + WinBuiltinUsersSid = 27 + WinBuiltinGuestsSid = 28 + WinBuiltinPowerUsersSid = 29 + WinBuiltinAccountOperatorsSid = 30 + WinBuiltinSystemOperatorsSid = 31 + WinBuiltinPrintOperatorsSid = 32 + WinBuiltinBackupOperatorsSid = 33 + WinBuiltinReplicatorSid = 34 + WinBuiltinPreWindows2000CompatibleAccessSid = 35 + WinBuiltinRemoteDesktopUsersSid = 36 + WinBuiltinNetworkConfigurationOperatorsSid = 37 + WinAccountAdministratorSid = 38 + WinAccountGuestSid = 39 + WinAccountKrbtgtSid = 40 + WinAccountDomainAdminsSid = 41 + WinAccountDomainUsersSid = 42 + WinAccountDomainGuestsSid = 43 + WinAccountComputersSid = 44 + WinAccountControllersSid = 45 + WinAccountCertAdminsSid = 46 + WinAccountSchemaAdminsSid = 47 + WinAccountEnterpriseAdminsSid = 48 + WinAccountPolicyAdminsSid = 49 + WinAccountRasAndIasServersSid = 50 + WinNTLMAuthenticationSid = 51 + WinDigestAuthenticationSid = 52 + WinSChannelAuthenticationSid = 53 + WinThisOrganizationSid = 54 + WinOtherOrganizationSid = 55 + WinBuiltinIncomingForestTrustBuildersSid = 56 + WinBuiltinPerfMonitoringUsersSid = 57 + WinBuiltinPerfLoggingUsersSid = 58 + WinBuiltinAuthorizationAccessSid = 59 + WinBuiltinTerminalServerLicenseServersSid = 60 + WinBuiltinDCOMUsersSid = 61 + WinBuiltinIUsersSid = 62 + WinIUserSid = 63 + WinBuiltinCryptoOperatorsSid = 64 + WinUntrustedLabelSid = 65 + WinLowLabelSid = 66 + WinMediumLabelSid = 67 + WinHighLabelSid = 68 + WinSystemLabelSid = 69 + WinWriteRestrictedCodeSid = 70 + WinCreatorOwnerRightsSid = 71 + WinCacheablePrincipalsGroupSid = 72 + WinNonCacheablePrincipalsGroupSid = 73 + WinEnterpriseReadonlyControllersSid = 74 + WinAccountReadonlyControllersSid = 75 + WinBuiltinEventLogReadersGroup = 76 + WinNewEnterpriseReadonlyControllersSid = 77 + WinBuiltinCertSvcDComAccessGroup = 78 + WinMediumPlusLabelSid = 79 + WinLocalLogonSid = 80 + WinConsoleLogonSid = 81 + WinThisOrganizationCertificateSid = 82 + WinApplicationPackageAuthoritySid = 83 + WinBuiltinAnyPackageSid = 84 + WinCapabilityInternetClientSid = 85 + WinCapabilityInternetClientServerSid = 86 + WinCapabilityPrivateNetworkClientServerSid = 87 + WinCapabilityPicturesLibrarySid = 88 + WinCapabilityVideosLibrarySid = 89 + WinCapabilityMusicLibrarySid = 90 + WinCapabilityDocumentsLibrarySid = 91 + WinCapabilitySharedUserCertificatesSid = 92 + WinCapabilityEnterpriseAuthenticationSid = 93 + WinCapabilityRemovableStorageSid = 94 + WinBuiltinRDSRemoteAccessServersSid = 95 + WinBuiltinRDSEndpointServersSid = 96 + WinBuiltinRDSManagementServersSid = 97 + WinUserModeDriversSid = 98 + WinBuiltinHyperVAdminsSid = 99 + WinAccountCloneableControllersSid = 100 + WinBuiltinAccessControlAssistanceOperatorsSid = 101 + WinBuiltinRemoteManagementUsersSid = 102 + WinAuthenticationAuthorityAssertedSid = 103 + WinAuthenticationServiceAssertedSid = 104 + WinLocalAccountSid = 105 + WinLocalAccountAndAdministratorSid = 106 + WinAccountProtectedUsersSid = 107 + WinCapabilityAppointmentsSid = 108 + WinCapabilityContactsSid = 109 + WinAccountDefaultSystemManagedSid = 110 + WinBuiltinDefaultSystemManagedGroupSid = 111 + WinBuiltinStorageReplicaAdminsSid = 112 + WinAccountKeyAdminsSid = 113 + WinAccountEnterpriseKeyAdminsSid = 114 + WinAuthenticationKeyTrustSid = 115 + WinAuthenticationKeyPropertyMFASid = 116 + WinAuthenticationKeyPropertyAttestationSid = 117 + WinAuthenticationFreshKeyAuthSid = 118 + WinBuiltinDeviceOwnersSid = 119 +) + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the local machine. +func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { + return CreateWellKnownDomainSid(sidType, nil) +} + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the domain specified by the domainSid parameter. +func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { + n := uint32(50) + for { + b := make([]byte, n) + sid := (*SID)(unsafe.Pointer(&b[0])) + err := createWellKnownSid(sidType, domainSid, sid, &n) + if err == nil { + return sid, nil + } + if err != ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + if n <= uint32(len(b)) { + return nil, err + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_SESSIONID + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT | + TOKEN_ADJUST_SESSIONID + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +// Group attributes inside of Tokengroups.Groups[i].Attributes +const ( + SE_GROUP_MANDATORY = 0x00000001 + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 + SE_GROUP_ENABLED = 0x00000004 + SE_GROUP_OWNER = 0x00000008 + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 + SE_GROUP_INTEGRITY = 0x00000020 + SE_GROUP_INTEGRITY_ENABLED = 0x00000040 + SE_GROUP_LOGON_ID = 0xC0000000 + SE_GROUP_RESOURCE = 0x20000000 + SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED +) + +// Privilege attributes +const ( + SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 + SE_PRIVILEGE_ENABLED = 0x00000002 + SE_PRIVILEGE_REMOVED = 0x00000004 + SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 + SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS +) + +// Token types +const ( + TokenPrimary = 1 + TokenImpersonation = 2 +) + +// Impersonation levels +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUIDAndAttributes struct { + Luid LUID + Attributes uint32 +} + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes // Use AllGroups() for iterating. +} + +// AllGroups returns a slice that can be used to iterate over the groups in g. +func (g *Tokengroups) AllGroups() []SIDAndAttributes { + return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +type Tokenprivileges struct { + PrivilegeCount uint32 + Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. +} + +// AllPrivileges returns a slice that can be used to iterate over the privileges in p. +func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { + return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] +} + +type Tokenmandatorylabel struct { + Label SIDAndAttributes +} + +func (tml *Tokenmandatorylabel) Size() uint32 { + return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges +//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups +//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation +//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW +//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens an access token associated with current +// process with TOKEN_QUERY access. It is a real token that needs to be closed. +// +// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) +// with the desired access instead, or use GetCurrentProcessToken for a +// TOKEN_QUERY token. +func OpenCurrentProcessToken() (Token, error) { + var token Token + err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) + return token, err +} + +// GetCurrentProcessToken returns the access token associated with +// the current process. It is a pseudo token that does not need +// to be closed. +func GetCurrentProcessToken() Token { + return Token(^uintptr(4 - 1)) +} + +// GetCurrentThreadToken return the access token associated with +// the current thread. It is a pseudo token that does not need +// to be closed. +func GetCurrentThreadToken() Token { + return Token(^uintptr(5 - 1)) +} + +// GetCurrentThreadEffectiveToken returns the effective access token +// associated with the current thread. It is a pseudo token that does +// not need to be closed. +func GetCurrentThreadEffectiveToken() Token { + return Token(^uintptr(6 - 1)) +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsElevated returns whether the current token is elevated from a UAC perspective. +func (token Token) IsElevated() bool { + var isElevated uint32 + var outLen uint32 + err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) + if err != nil { + return false + } + return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 +} + +// GetLinkedToken returns the linked token, which may be an elevated UAC token. +func (token Token) GetLinkedToken() (Token, error) { + var linkedToken Token + var outLen uint32 + err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) + if err != nil { + return Token(0), err + } + return linkedToken, nil +} + +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. +func GetSystemDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} + +const ( + WTS_CONSOLE_CONNECT = 0x1 + WTS_CONSOLE_DISCONNECT = 0x2 + WTS_REMOTE_CONNECT = 0x3 + WTS_REMOTE_DISCONNECT = 0x4 + WTS_SESSION_LOGON = 0x5 + WTS_SESSION_LOGOFF = 0x6 + WTS_SESSION_LOCK = 0x7 + WTS_SESSION_UNLOCK = 0x8 + WTS_SESSION_REMOTE_CONTROL = 0x9 + WTS_SESSION_CREATE = 0xa + WTS_SESSION_TERMINATE = 0xb +) + +const ( + WTSActive = 0 + WTSConnected = 1 + WTSConnectQuery = 2 + WTSShadow = 3 + WTSDisconnected = 4 + WTSIdle = 5 + WTSListen = 6 + WTSReset = 7 + WTSDown = 8 + WTSInit = 9 +) + +type WTSSESSION_NOTIFICATION struct { + Size uint32 + SessionID uint32 +} + +type WTS_SESSION_INFO struct { + SessionID uint32 + WindowStationName *uint16 + State uint32 +} + +//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken +//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW +//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory + +type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 +} + +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *SID + group *SID + sacl *ACL + dacl *ACL +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + InheritHandle uint32 +} + +type SE_OBJECT_TYPE uint32 + +// Constants for type SE_OBJECT_TYPE +const ( + SE_UNKNOWN_OBJECT_TYPE = 0 + SE_FILE_OBJECT = 1 + SE_SERVICE = 2 + SE_PRINTER = 3 + SE_REGISTRY_KEY = 4 + SE_LMSHARE = 5 + SE_KERNEL_OBJECT = 6 + SE_WINDOW_OBJECT = 7 + SE_DS_OBJECT = 8 + SE_DS_OBJECT_ALL = 9 + SE_PROVIDER_DEFINED_OBJECT = 10 + SE_WMIGUID_OBJECT = 11 + SE_REGISTRY_WOW64_32KEY = 12 + SE_REGISTRY_WOW64_64KEY = 13 +) + +type SECURITY_INFORMATION uint32 + +// Constants for type SECURITY_INFORMATION +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// Constants for type SECURITY_DESCRIPTOR_CONTROL +const ( + SE_OWNER_DEFAULTED = 0x0001 + SE_GROUP_DEFAULTED = 0x0002 + SE_DACL_PRESENT = 0x0004 + SE_DACL_DEFAULTED = 0x0008 + SE_SACL_PRESENT = 0x0010 + SE_SACL_DEFAULTED = 0x0020 + SE_DACL_AUTO_INHERIT_REQ = 0x0100 + SE_SACL_AUTO_INHERIT_REQ = 0x0200 + SE_DACL_AUTO_INHERITED = 0x0400 + SE_SACL_AUTO_INHERITED = 0x0800 + SE_DACL_PROTECTED = 0x1000 + SE_SACL_PROTECTED = 0x2000 + SE_RM_CONTROL_VALID = 0x4000 + SE_SELF_RELATIVE = 0x8000 +) + +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +type ACCESS_MODE uint32 + +// Constants for type ACCESS_MODE +const ( + NOT_USED_ACCESS = 0 + GRANT_ACCESS = 1 + SET_ACCESS = 2 + DENY_ACCESS = 3 + REVOKE_ACCESS = 4 + SET_AUDIT_SUCCESS = 5 + SET_AUDIT_FAILURE = 6 +) + +// Constants for AceFlags and Inheritance fields +const ( + NO_INHERITANCE = 0x0 + SUB_OBJECTS_ONLY_INHERIT = 0x1 + SUB_CONTAINERS_ONLY_INHERIT = 0x2 + SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 + INHERIT_NO_PROPAGATE = 0x4 + INHERIT_ONLY = 0x8 + INHERITED_ACCESS_ENTRY = 0x10 + INHERITED_PARENT = 0x10000000 + INHERITED_GRANDPARENT = 0x20000000 + OBJECT_INHERIT_ACE = 0x1 + CONTAINER_INHERIT_ACE = 0x2 + NO_PROPAGATE_INHERIT_ACE = 0x4 + INHERIT_ONLY_ACE = 0x8 + INHERITED_ACE = 0x10 + VALID_INHERIT_FLAGS = 0x1F +) + +type MULTIPLE_TRUSTEE_OPERATION uint32 + +// Constants for MULTIPLE_TRUSTEE_OPERATION +const ( + NO_MULTIPLE_TRUSTEE = 0 + TRUSTEE_IS_IMPERSONATE = 1 +) + +type TRUSTEE_FORM uint32 + +// Constants for TRUSTEE_FORM +const ( + TRUSTEE_IS_SID = 0 + TRUSTEE_IS_NAME = 1 + TRUSTEE_BAD_FORM = 2 + TRUSTEE_IS_OBJECTS_AND_SID = 3 + TRUSTEE_IS_OBJECTS_AND_NAME = 4 +) + +type TRUSTEE_TYPE uint32 + +// Constants for TRUSTEE_TYPE +const ( + TRUSTEE_IS_UNKNOWN = 0 + TRUSTEE_IS_USER = 1 + TRUSTEE_IS_GROUP = 2 + TRUSTEE_IS_DOMAIN = 3 + TRUSTEE_IS_ALIAS = 4 + TRUSTEE_IS_WELL_KNOWN_GROUP = 5 + TRUSTEE_IS_DELETED = 6 + TRUSTEE_IS_INVALID = 7 + TRUSTEE_IS_COMPUTER = 8 +) + +// Constants for ObjectsPresent field +const ( + ACE_OBJECT_TYPE_PRESENT = 0x1 + ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 +) + +type EXPLICIT_ACCESS struct { + AccessPermissions ACCESS_MASK + AccessMode ACCESS_MODE + Inheritance uint32 + Trustee TRUSTEE +} + +// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. +type TrusteeValue uintptr + +func TrusteeValueFromString(str string) TrusteeValue { + return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) +} +func TrusteeValueFromSID(sid *SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(sid)) +} +func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndSid)) +} +func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndName)) +} + +type TRUSTEE struct { + MultipleTrustee *TRUSTEE + MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION + TrusteeForm TRUSTEE_FORM + TrusteeType TRUSTEE_TYPE + TrusteeValue TrusteeValue +} + +type OBJECTS_AND_SID struct { + ObjectsPresent uint32 + ObjectTypeGuid GUID + InheritedObjectTypeGuid GUID + Sid *SID +} + +type OBJECTS_AND_NAME struct { + ObjectsPresent uint32 + ObjectType SE_OBJECT_TYPE + ObjectTypeName *uint16 + InheritedObjectTypeName *uint16 + Name *uint16 +} + +//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo +//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW +//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW + +//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW +//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor + +//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl +//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl +//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl +//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner +//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup +//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength +//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl +//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor + +//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl +//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl +//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl +//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner +//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup +//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl + +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW + +//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD +//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD + +//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW + +// Control returns the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { + err = getSecurityDescriptorControl(sd, &control, &revision) + return +} + +// SetControl sets the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { + return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) +} + +// RMControl returns the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { + err = getSecurityDescriptorRMControl(sd, &control) + return +} + +// SetRMControl sets the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { + setSecurityDescriptorRMControl(sd, &rmControl) +} + +// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil +// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetDACL sets the absolute security descriptor DACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) +} + +// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil +// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetSACL sets the absolute security descriptor SACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) +} + +// Owner returns the security descriptor owner and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { + err = getSecurityDescriptorOwner(sd, &owner, &defaulted) + return +} + +// SetOwner sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { + return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) +} + +// Group returns the security descriptor group and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { + err = getSecurityDescriptorGroup(sd, &group, &defaulted) + return +} + +// SetGroup sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { + return setSecurityDescriptorGroup(absoluteSD, group, defaulted) +} + +// Length returns the length of the security descriptor. +func (sd *SECURITY_DESCRIPTOR) Length() uint32 { + return getSecurityDescriptorLength(sd) +} + +// IsValid returns whether the security descriptor is valid. +func (sd *SECURITY_DESCRIPTOR) IsValid() bool { + return isValidSecurityDescriptor(sd) +} + +// String returns the SDDL form of the security descriptor, with a function signature that can be +// used with %v formatting directives. +func (sd *SECURITY_DESCRIPTOR) String() string { + var sddl *uint16 + err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) + if err != nil { + return "" + } + defer LocalFree(Handle(unsafe.Pointer(sddl))) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:]) +} + +// ToAbsolute converts a self-relative security descriptor into an absolute one. +func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := selfRelativeSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE == 0 { + err = ERROR_INVALID_PARAMETER + return + } + var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 + err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, + nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeAbsoluteSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if absoluteSDSize > 0 { + absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + } + var ( + dacl *ACL + sacl *ACL + owner *SID + group *SID + ) + if daclSize > 0 { + dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + } + if saclSize > 0 { + sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + } + if ownerSize > 0 { + owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + } + if groupSize > 0 { + group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + } + err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, + dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + return +} + +// ToSelfRelative converts an absolute security descriptor into a self-relative one. +func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := absoluteSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE != 0 { + err = ERROR_INVALID_PARAMETER + return + } + var selfRelativeSDSize uint32 + err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeSelfRelativeSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if selfRelativeSDSize > 0 { + selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) + } + err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) + return +} + +func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { + sdBytes := make([]byte, selfRelativeSD.Length()) + copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)]) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0])) +} + +// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a +// self-relative security descriptor object allocated on the Go heap. +func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetSecurityInfo queries the security information for a given handle and returns the self-relative security +// descriptor result on the Go heap. +func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security +// descriptor result on the Go heap. +func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and +// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor +// result on the Go heap. +func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + var winHeapSDSize uint32 + var firstAccessEntry *EXPLICIT_ACCESS + if len(accessEntries) > 0 { + firstAccessEntry = &accessEntries[0] + } + var firstAuditEntry *EXPLICIT_ACCESS + if len(auditEntries) > 0 { + firstAuditEntry = &auditEntries[0] + } + err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// NewSecurityDescriptor creates and initializes a new absolute security descriptor. +func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + absoluteSD = &SECURITY_DESCRIPTOR{} + err = initializeSecurityDescriptor(absoluteSD, 1) + return +} + +// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. +// Both explicitEntries and mergedACL are optional and can be nil. +func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { + var firstExplicitEntry *EXPLICIT_ACCESS + if len(explicitEntries) > 0 { + firstExplicitEntry = &explicitEntries[0] + } + var winHeapACL *ACL + err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) + aclBytes := make([]byte, winHeapACL.aclSize) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)]) + return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 00000000..847e00bc --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,229 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 + SERVICE_CONFIG_SERVICE_SID_INFO = 5 + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 + SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 + SERVICE_CONFIG_TRIGGER_INFO = 8 + SERVICE_CONFIG_PREFERRED_NODE = 9 + SERVICE_CONFIG_LAUNCH_PROTECTED = 12 + + SERVICE_SID_TYPE_NONE = 0 + SERVICE_SID_TYPE_UNRESTRICTED = 1 + SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED + + SC_ENUM_PROCESS_INFO = 0 + + SERVICE_NOTIFY_STATUS_CHANGE = 2 + SERVICE_NOTIFY_STOPPED = 0x00000001 + SERVICE_NOTIFY_START_PENDING = 0x00000002 + SERVICE_NOTIFY_STOP_PENDING = 0x00000004 + SERVICE_NOTIFY_RUNNING = 0x00000008 + SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 + SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 + SERVICE_NOTIFY_PAUSED = 0x00000040 + SERVICE_NOTIFY_CREATED = 0x00000080 + SERVICE_NOTIFY_DELETED = 0x00000100 + SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 +) + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +type SERVICE_NOTIFY struct { + Version uint32 + NotifyCallback uintptr + Context uintptr + NotificationStatus uint32 + ServiceStatus SERVICE_STATUS_PROCESS + NotificationTriggered uint32 + ServiceNames *uint16 +} + +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go new file mode 100644 index 00000000..917cc2aa --- /dev/null +++ b/vendor/golang.org/x/sys/windows/str.go @@ -0,0 +1,22 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + itoa(-val) + } + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 00000000..af828a91 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "syscall" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 00000000..33513e34 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1372 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "sync" + "syscall" + "time" + "unicode/utf16" + "unsafe" +) + +type Handle uintptr + +const ( + InvalidHandle = ^Handle(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 + + // Flags for LockFileEx. + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 + + // Return values of SleepEx and other APC functions + STATUS_USER_APC = 0x000000C0 + WAIT_IO_COMPLETION = STATUS_USER_APC +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + return utf16.Encode([]rune(s + "\x00")), nil +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL removed. +func UTF16ToString(s []uint16) string { + for i, v := range s { + if v == 0 { + s = s[0:i] + break + } + } + return string(utf16.Decode(s)) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW +//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex +//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx +//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW +//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject +//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject +//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode +//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread +//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass +//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) +//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW +//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx +//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW +//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters +//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree +//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers + +// syscall interface implementation for other packages + +// GetCurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentProcess for the same Handle without the nil +// error. +func GetCurrentProcess() (Handle, error) { + return CurrentProcess(), nil +} + +// CurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } + +// GetCurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentThread for the same Handle without the nil +// error. +func GetCurrentThread() (Handle, error) { + return CurrentThread(), nil +} + +// CurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + if raceenabled { + if done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + if raceenabled && done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(done)) + } + return int(done), nil +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + CloseOnExec(r) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + // Every other win32 array API takes arguments as "pointer, count", except for this function. So we + // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore + // trivially stub this ourselves. + + var handlePtr *Handle + if len(handles) > 0 { + handlePtr = &handles[0] + } + return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [100]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type RawSockaddrUnix struct { + Family uint16 + Path [UNIX_PATH_MAX]int8 +} + +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, syscall.EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := int32(2) + if n > 0 { + sl += int32(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + rsa, l, err := to.sockaddr() + if err != nil { + return err + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + return 0, nil, syscall.EWINDOWS +} +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { return syscall.EWINDOWS } +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(GetCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer CoTaskMemFree(unsafe.Pointer(p)) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil +} + +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. +func RtlGetVersion() *OsVersionInfoEx { + info := &OsVersionInfoEx{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) + // According to documentation, this function always succeeds. + // The function doesn't even check the validity of the + // osVersionInfoSize member. Disassembling ntdll.dll indicates + // that the documentation is indeed correct about that. + _ = rtlGetVersion(info) + return info +} + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 00000000..7f178bb9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,1744 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "net" + "syscall" + "unsafe" +) + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + FILE_LIST_DIRECTORY = 0x00000001 + FILE_APPEND_DATA = 0x00000004 + FILE_WRITE_ATTRIBUTES = 0x00000100 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 + FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 + FILE_FLAG_OPEN_NO_RECALL = 0x00100000 + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_SESSION_AWARE = 0x00800000 + FILE_FLAG_POSIX_SEMANTICS = 0x01000000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 + FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 + FILE_FLAG_RANDOM_ACCESS = 0x10000000 + FILE_FLAG_NO_BUFFERING = 0x20000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_WRITE_THROUGH = 0x80000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_COMPUTERNAME_LENGTH = 15 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // filters for ReadDirectoryChangesW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + /* msgAndCertEncodingType values for CertOpenStore function */ + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + /* storeProvider values for CertOpenStore function */ + CERT_STORE_PROV_MSG = 1 + CERT_STORE_PROV_MEMORY = 2 + CERT_STORE_PROV_FILE = 3 + CERT_STORE_PROV_REG = 4 + CERT_STORE_PROV_PKCS7 = 5 + CERT_STORE_PROV_SERIALIZED = 6 + CERT_STORE_PROV_FILENAME_A = 7 + CERT_STORE_PROV_FILENAME_W = 8 + CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W + CERT_STORE_PROV_SYSTEM_A = 9 + CERT_STORE_PROV_SYSTEM_W = 10 + CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W + CERT_STORE_PROV_COLLECTION = 11 + CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 + CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 + CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W + CERT_STORE_PROV_PHYSICAL_W = 14 + CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W + CERT_STORE_PROV_SMART_CARD_W = 15 + CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W + CERT_STORE_PROV_LDAP_W = 16 + CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W + CERT_STORE_PROV_PKCS12 = 17 + + /* store characteristics (low WORD of flag) for CertOpenStore function */ + CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 + CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + CERT_STORE_DELETE_FLAG = 0x00000010 + CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 + CERT_STORE_SHARE_STORE_FLAG = 0x00000040 + CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 + CERT_STORE_MANIFOLD_FLAG = 0x00000100 + CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 + CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 + CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 + CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 + CERT_STORE_CREATE_NEW_FLAG = 0x00002000 + CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 + CERT_STORE_READONLY_FLAG = 0x00008000 + + /* store locations (high WORD of flag) for CertOpenStore function */ + CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 + CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 + CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 + CERT_SYSTEM_STORE_SERVICES = 0x00050000 + CERT_SYSTEM_STORE_USERS = 0x00060000 + CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 + CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 + CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 + + /* Miscellaneous high-WORD flags for CertOpenStore function */ + CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 + CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 + CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 + CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 + CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 + CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 + CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 + CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 + CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 + CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 + CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 + + /* addDisposition values for CertAddCertificateContextToStore function */ + CERT_STORE_ADD_NEW = 1 + CERT_STORE_ADD_USE_EXISTING = 2 + CERT_STORE_ADD_REPLACE_EXISTING = 3 + CERT_STORE_ADD_ALWAYS = 4 + CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 + CERT_STORE_ADD_NEWER = 6 + CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 + + /* ErrorStatus values for CertTrustStatus struct */ + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 + CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 + CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 + CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 + CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + /* InfoStatus values for CertTrustStatus struct */ + CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 + CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 + CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 + CERT_TRUST_IS_SELF_SIGNED = 0x00000008 + CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 + CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 + CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 + CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 + CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 + CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 + CERT_TRUST_IS_CA_TRUSTED = 0x00004000 + CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + + /* policyOID values for CertVerifyCertificateChainPolicy function */ + CERT_CHAIN_POLICY_BASE = 1 + CERT_CHAIN_POLICY_AUTHENTICODE = 2 + CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 + CERT_CHAIN_POLICY_SSL = 4 + CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 + CERT_CHAIN_POLICY_NT_AUTH = 6 + CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 + CERT_CHAIN_POLICY_EV = 8 + CERT_CHAIN_POLICY_SSL_F12 = 9 + + /* AuthType values for SSLExtraCertChainPolicyPara struct */ + AUTHTYPE_CLIENT = 1 + AUTHTYPE_SERVER = 2 + + /* Checks values for SSLExtraCertChainPolicyPara struct */ + SECURITY_FLAG_IGNORE_REVOCATION = 0x00000080 + SECURITY_FLAG_IGNORE_UNKNOWN_CA = 0x00000100 + SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200 + SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000 + SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 +) + +const ( + // flags for SetErrorMode + SEM_FAILCRITICALERRORS = 0x0001 + SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 + SEM_NOGPFAULTERRORBOX = 0x0002 + SEM_NOOPENFILEERRORBOX = 0x8000 +) + +const ( + // Priority class. + ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 + BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 + HIGH_PRIORITY_CLASS = 0x00000080 + IDLE_PRIORITY_CLASS = 0x00000040 + NORMAL_PRIORITY_CLASS = 0x00000020 + PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 + PROCESS_MODE_BACKGROUND_END = 0x00200000 + REALTIME_PRIORITY_CLASS = 0x00000100 +) + +var ( + OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") + OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") + OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") +) + +// Pointer represents a pointer to an arbitrary Windows type. +// +// Pointer-typed fields may point to one of many different types. It's +// up to the caller to provide a pointer to the appropriate type, cast +// to Pointer. The caller must obey the unsafe.Pointer rules while +// doing so. +type Pointer *struct{} + +// Invented values to support what package os expects. +type Timeval struct { + Sec int32 + Usec int32 +} + +func (tv *Timeval) Nanoseconds() int64 { + return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +type Overlapped struct { + Internal uintptr + InternalHigh uintptr + Offset uint32 + OffsetHigh uint32 + HEvent Handle +} + +type FileNotifyInformation struct { + NextEntryOffset uint32 + Action uint32 + FileNameLength uint32 + FileName uint16 +} + +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Nanoseconds returns Filetime ft in nanoseconds +// since Epoch (00:00:00 UTC, January 1, 1970). +func (ft *Filetime) Nanoseconds() int64 { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return nsec +} + +func NsecToFiletime(nsec int64) (ft Filetime) { + // convert into 100-nanosecond + nsec /= 100 + // change starting time to January 1, 1601 + nsec += 116444736000000000 + // split into high / low + ft.LowDateTime = uint32(nsec & 0xffffffff) + ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_INET6 = 23 + AF_NETBIOS = 17 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_IPV6 = 0x29 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertInfo struct { + // Not implemented +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo *CertInfo + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertTrustListInfo struct { + // Not implemented +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo *CertTrustListInfo + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationCrlInfo struct { + // Not implemented +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo Pointer + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara Pointer +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus Pointer +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_GET_REPARSE_POINT = 0x900A8 + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +// For MessageBox() +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 + MB_APPLMODAL = 0x00000000 + MB_SYSTEMMODAL = 0x00001000 + MB_TASKMODAL = 0x00002000 + MB_HELP = 0x00004000 + MB_NOFOCUS = 0x00008000 + MB_SETFOREGROUND = 0x00010000 + MB_DEFAULT_DESKTOP_ONLY = 0x00020000 + MB_TOPMOST = 0x00040000 + MB_RIGHT = 0x00080000 + MB_RTLREADING = 0x00100000 + MB_SERVICE_NOTIFICATION = 0x00200000 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. +func (addr *SocketAddress) IP() net.IP { + if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { + return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { + return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } + return nil +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retrieve information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} + +const UNIX_PATH_MAX = 108 // defined in afunix.h + +const ( + // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags + JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 + JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 + JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 + JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 + JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 + JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 + JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 + JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 + JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 + JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 + JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 + JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 +) + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +type IO_COUNTERS struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { + BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION + IoInfo IO_COUNTERS + ProcessMemoryLimit uintptr + JobMemoryLimit uintptr + PeakProcessMemoryUsed uintptr + PeakJobMemoryUsed uintptr +} + +const ( + // UIRestrictionsClass + JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 + JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 + JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 + JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 + JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 + JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 +) + +type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { + UIRestrictionsClass uint32 +} + +const ( + // JobObjectInformationClass + JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicLimitInformation = 2 + JobObjectBasicUIRestrictions = 4 + JobObjectCpuRateControlInformation = 15 + JobObjectEndOfJobTimeInformation = 6 + JobObjectExtendedLimitInformation = 9 + JobObjectGroupInformation = 11 + JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation2 = 35 + JobObjectNetRateControlInformation = 32 + JobObjectNotificationLimitInformation = 12 + JobObjectNotificationLimitInformation2 = 34 + JobObjectSecurityLimitInformation = 5 +) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) + +type OsVersionInfoEx struct { + osVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformId uint32 + CsdVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + _ byte +} + +const ( + EWX_LOGOFF = 0x00000000 + EWX_SHUTDOWN = 0x00000001 + EWX_REBOOT = 0x00000002 + EWX_FORCE = 0x00000004 + EWX_POWEROFF = 0x00000008 + EWX_FORCEIFHUNG = 0x00000010 + EWX_QUICKRESOLVE = 0x00000020 + EWX_RESTARTAPPS = 0x00000040 + EWX_HYBRID_SHUTDOWN = 0x00400000 + EWX_BOOTOPTIONS = 0x01000000 + + SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 + SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 + SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 + SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 + SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 + SHTDN_REASON_FLAG_PLANNED = 0x80000000 + SHTDN_REASON_MAJOR_OTHER = 0x00000000 + SHTDN_REASON_MAJOR_NONE = 0x00000000 + SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 + SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 + SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 + SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 + SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 + SHTDN_REASON_MAJOR_POWER = 0x00060000 + SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 + SHTDN_REASON_MINOR_OTHER = 0x00000000 + SHTDN_REASON_MINOR_NONE = 0x000000ff + SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 + SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 + SHTDN_REASON_MINOR_UPGRADE = 0x00000003 + SHTDN_REASON_MINOR_RECONFIG = 0x00000004 + SHTDN_REASON_MINOR_HUNG = 0x00000005 + SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 + SHTDN_REASON_MINOR_DISK = 0x00000007 + SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 + SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 + SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a + SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b + SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c + SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d + SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e + SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F + SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 + SHTDN_REASON_MINOR_HOTFIX = 0x00000011 + SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 + SHTDN_REASON_MINOR_SECURITY = 0x00000013 + SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 + SHTDN_REASON_MINOR_WMI = 0x00000015 + SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 + SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 + SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 + SHTDN_REASON_MINOR_MMC = 0x00000019 + SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a + SHTDN_REASON_MINOR_TERMSRV = 0x00000020 + SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 + SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 + SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE + SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED + SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff + + SHUTDOWN_NORETRY = 0x1 +) + +// Flags used for GetModuleHandleEx +const ( + GET_MODULE_HANDLE_EX_FLAG_PIN = 1 + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 00000000..fe0ddd03 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 00000000..7e154c2d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go new file mode 100644 index 00000000..74571e36 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go new file mode 100644 index 00000000..f0212003 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -0,0 +1,6853 @@ +// Code generated by 'mkerrors.bash'; DO NOT EDIT. + +package windows + +import "syscall" + +const ( + FACILITY_NULL = 0 + FACILITY_RPC = 1 + FACILITY_DISPATCH = 2 + FACILITY_STORAGE = 3 + FACILITY_ITF = 4 + FACILITY_WIN32 = 7 + FACILITY_WINDOWS = 8 + FACILITY_SSPI = 9 + FACILITY_SECURITY = 9 + FACILITY_CONTROL = 10 + FACILITY_CERT = 11 + FACILITY_INTERNET = 12 + FACILITY_MEDIASERVER = 13 + FACILITY_MSMQ = 14 + FACILITY_SETUPAPI = 15 + FACILITY_SCARD = 16 + FACILITY_COMPLUS = 17 + FACILITY_AAF = 18 + FACILITY_URT = 19 + FACILITY_ACS = 20 + FACILITY_DPLAY = 21 + FACILITY_UMI = 22 + FACILITY_SXS = 23 + FACILITY_WINDOWS_CE = 24 + FACILITY_HTTP = 25 + FACILITY_USERMODE_COMMONLOG = 26 + FACILITY_WER = 27 + FACILITY_USERMODE_FILTER_MANAGER = 31 + FACILITY_BACKGROUNDCOPY = 32 + FACILITY_CONFIGURATION = 33 + FACILITY_WIA = 33 + FACILITY_STATE_MANAGEMENT = 34 + FACILITY_METADIRECTORY = 35 + FACILITY_WINDOWSUPDATE = 36 + FACILITY_DIRECTORYSERVICE = 37 + FACILITY_GRAPHICS = 38 + FACILITY_SHELL = 39 + FACILITY_NAP = 39 + FACILITY_TPM_SERVICES = 40 + FACILITY_TPM_SOFTWARE = 41 + FACILITY_UI = 42 + FACILITY_XAML = 43 + FACILITY_ACTION_QUEUE = 44 + FACILITY_PLA = 48 + FACILITY_WINDOWS_SETUP = 48 + FACILITY_FVE = 49 + FACILITY_FWP = 50 + FACILITY_WINRM = 51 + FACILITY_NDIS = 52 + FACILITY_USERMODE_HYPERVISOR = 53 + FACILITY_CMI = 54 + FACILITY_USERMODE_VIRTUALIZATION = 55 + FACILITY_USERMODE_VOLMGR = 56 + FACILITY_BCD = 57 + FACILITY_USERMODE_VHD = 58 + FACILITY_USERMODE_HNS = 59 + FACILITY_SDIAG = 60 + FACILITY_WEBSERVICES = 61 + FACILITY_WINPE = 61 + FACILITY_WPN = 62 + FACILITY_WINDOWS_STORE = 63 + FACILITY_INPUT = 64 + FACILITY_EAP = 66 + FACILITY_WINDOWS_DEFENDER = 80 + FACILITY_OPC = 81 + FACILITY_XPS = 82 + FACILITY_MBN = 84 + FACILITY_POWERSHELL = 84 + FACILITY_RAS = 83 + FACILITY_P2P_INT = 98 + FACILITY_P2P = 99 + FACILITY_DAF = 100 + FACILITY_BLUETOOTH_ATT = 101 + FACILITY_AUDIO = 102 + FACILITY_STATEREPOSITORY = 103 + FACILITY_VISUALCPP = 109 + FACILITY_SCRIPT = 112 + FACILITY_PARSE = 113 + FACILITY_BLB = 120 + FACILITY_BLB_CLI = 121 + FACILITY_WSBAPP = 122 + FACILITY_BLBUI = 128 + FACILITY_USN = 129 + FACILITY_USERMODE_VOLSNAP = 130 + FACILITY_TIERING = 131 + FACILITY_WSB_ONLINE = 133 + FACILITY_ONLINE_ID = 134 + FACILITY_DEVICE_UPDATE_AGENT = 135 + FACILITY_DRVSERVICING = 136 + FACILITY_DLS = 153 + FACILITY_DELIVERY_OPTIMIZATION = 208 + FACILITY_USERMODE_SPACES = 231 + FACILITY_USER_MODE_SECURITY_CORE = 232 + FACILITY_USERMODE_LICENSING = 234 + FACILITY_SOS = 160 + FACILITY_DEBUGGERS = 176 + FACILITY_SPP = 256 + FACILITY_RESTORE = 256 + FACILITY_DMSERVER = 256 + FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 + FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 + FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 + FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 + FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 + FACILITY_DEPLOYMENT_SERVICES_PXE = 263 + FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 + FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 + FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 + FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 + FACILITY_LINGUISTIC_SERVICES = 305 + FACILITY_AUDIOSTREAMING = 1094 + FACILITY_ACCELERATOR = 1536 + FACILITY_WMAAECMA = 1996 + FACILITY_DIRECTMUSIC = 2168 + FACILITY_DIRECT3D10 = 2169 + FACILITY_DXGI = 2170 + FACILITY_DXGI_DDI = 2171 + FACILITY_DIRECT3D11 = 2172 + FACILITY_DIRECT3D11_DEBUG = 2173 + FACILITY_DIRECT3D12 = 2174 + FACILITY_DIRECT3D12_DEBUG = 2175 + FACILITY_LEAP = 2184 + FACILITY_AUDCLNT = 2185 + FACILITY_WINCODEC_DWRITE_DWM = 2200 + FACILITY_WINML = 2192 + FACILITY_DIRECT2D = 2201 + FACILITY_DEFRAG = 2304 + FACILITY_USERMODE_SDBUS = 2305 + FACILITY_JSCRIPT = 2306 + FACILITY_PIDGENX = 2561 + FACILITY_EAS = 85 + FACILITY_WEB = 885 + FACILITY_WEB_SOCKET = 886 + FACILITY_MOBILE = 1793 + FACILITY_SQLITE = 1967 + FACILITY_UTC = 1989 + FACILITY_WEP = 2049 + FACILITY_SYNCENGINE = 2050 + FACILITY_XBOX = 2339 + FACILITY_PIX = 2748 + ERROR_SUCCESS syscall.Errno = 0 + NO_ERROR = 0 + SEC_E_OK Handle = 0x00000000 + ERROR_INVALID_FUNCTION syscall.Errno = 1 + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_ARENA_TRASHED syscall.Errno = 7 + ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 + ERROR_INVALID_BLOCK syscall.Errno = 9 + ERROR_BAD_ENVIRONMENT syscall.Errno = 10 + ERROR_BAD_FORMAT syscall.Errno = 11 + ERROR_INVALID_ACCESS syscall.Errno = 12 + ERROR_INVALID_DATA syscall.Errno = 13 + ERROR_OUTOFMEMORY syscall.Errno = 14 + ERROR_INVALID_DRIVE syscall.Errno = 15 + ERROR_CURRENT_DIRECTORY syscall.Errno = 16 + ERROR_NOT_SAME_DEVICE syscall.Errno = 17 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_WRITE_PROTECT syscall.Errno = 19 + ERROR_BAD_UNIT syscall.Errno = 20 + ERROR_NOT_READY syscall.Errno = 21 + ERROR_BAD_COMMAND syscall.Errno = 22 + ERROR_CRC syscall.Errno = 23 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_SEEK syscall.Errno = 25 + ERROR_NOT_DOS_DISK syscall.Errno = 26 + ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 + ERROR_OUT_OF_PAPER syscall.Errno = 28 + ERROR_WRITE_FAULT syscall.Errno = 29 + ERROR_READ_FAULT syscall.Errno = 30 + ERROR_GEN_FAILURE syscall.Errno = 31 + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_WRONG_DISK syscall.Errno = 34 + ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_HANDLE_DISK_FULL syscall.Errno = 39 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_REM_NOT_LIST syscall.Errno = 51 + ERROR_DUP_NAME syscall.Errno = 52 + ERROR_BAD_NETPATH syscall.Errno = 53 + ERROR_NETWORK_BUSY syscall.Errno = 54 + ERROR_DEV_NOT_EXIST syscall.Errno = 55 + ERROR_TOO_MANY_CMDS syscall.Errno = 56 + ERROR_ADAP_HDW_ERR syscall.Errno = 57 + ERROR_BAD_NET_RESP syscall.Errno = 58 + ERROR_UNEXP_NET_ERR syscall.Errno = 59 + ERROR_BAD_REM_ADAP syscall.Errno = 60 + ERROR_PRINTQ_FULL syscall.Errno = 61 + ERROR_NO_SPOOL_SPACE syscall.Errno = 62 + ERROR_PRINT_CANCELLED syscall.Errno = 63 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 + ERROR_BAD_DEV_TYPE syscall.Errno = 66 + ERROR_BAD_NET_NAME syscall.Errno = 67 + ERROR_TOO_MANY_NAMES syscall.Errno = 68 + ERROR_TOO_MANY_SESS syscall.Errno = 69 + ERROR_SHARING_PAUSED syscall.Errno = 70 + ERROR_REQ_NOT_ACCEP syscall.Errno = 71 + ERROR_REDIR_PAUSED syscall.Errno = 72 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_CANNOT_MAKE syscall.Errno = 82 + ERROR_FAIL_I24 syscall.Errno = 83 + ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 + ERROR_ALREADY_ASSIGNED syscall.Errno = 85 + ERROR_INVALID_PASSWORD syscall.Errno = 86 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_NET_WRITE_FAULT syscall.Errno = 88 + ERROR_NO_PROC_SLOTS syscall.Errno = 89 + ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 + ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 + ERROR_SEM_IS_SET syscall.Errno = 102 + ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 + ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 + ERROR_SEM_OWNER_DIED syscall.Errno = 105 + ERROR_SEM_USER_LIMIT syscall.Errno = 106 + ERROR_DISK_CHANGE syscall.Errno = 107 + ERROR_DRIVE_LOCKED syscall.Errno = 108 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_OPEN_FAILED syscall.Errno = 110 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_DISK_FULL syscall.Errno = 112 + ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 + ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 + ERROR_INVALID_CATEGORY syscall.Errno = 117 + ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 + ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_SEM_TIMEOUT syscall.Errno = 121 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_INVALID_LEVEL syscall.Errno = 124 + ERROR_NO_VOLUME_LABEL syscall.Errno = 125 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 + ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 + ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 + ERROR_NEGATIVE_SEEK syscall.Errno = 131 + ERROR_SEEK_ON_DEVICE syscall.Errno = 132 + ERROR_IS_JOIN_TARGET syscall.Errno = 133 + ERROR_IS_JOINED syscall.Errno = 134 + ERROR_IS_SUBSTED syscall.Errno = 135 + ERROR_NOT_JOINED syscall.Errno = 136 + ERROR_NOT_SUBSTED syscall.Errno = 137 + ERROR_JOIN_TO_JOIN syscall.Errno = 138 + ERROR_SUBST_TO_SUBST syscall.Errno = 139 + ERROR_JOIN_TO_SUBST syscall.Errno = 140 + ERROR_SUBST_TO_JOIN syscall.Errno = 141 + ERROR_BUSY_DRIVE syscall.Errno = 142 + ERROR_SAME_DRIVE syscall.Errno = 143 + ERROR_DIR_NOT_ROOT syscall.Errno = 144 + ERROR_DIR_NOT_EMPTY syscall.Errno = 145 + ERROR_IS_SUBST_PATH syscall.Errno = 146 + ERROR_IS_JOIN_PATH syscall.Errno = 147 + ERROR_PATH_BUSY syscall.Errno = 148 + ERROR_IS_SUBST_TARGET syscall.Errno = 149 + ERROR_SYSTEM_TRACE syscall.Errno = 150 + ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 + ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 + ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 + ERROR_LABEL_TOO_LONG syscall.Errno = 154 + ERROR_TOO_MANY_TCBS syscall.Errno = 155 + ERROR_SIGNAL_REFUSED syscall.Errno = 156 + ERROR_DISCARDED syscall.Errno = 157 + ERROR_NOT_LOCKED syscall.Errno = 158 + ERROR_BAD_THREADID_ADDR syscall.Errno = 159 + ERROR_BAD_ARGUMENTS syscall.Errno = 160 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_SIGNAL_PENDING syscall.Errno = 162 + ERROR_MAX_THRDS_REACHED syscall.Errno = 164 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_BUSY syscall.Errno = 170 + ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 + ERROR_CANCEL_VIOLATION syscall.Errno = 173 + ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 + ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 + ERROR_INVALID_ORDINAL syscall.Errno = 182 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 + ERROR_SEM_NOT_FOUND syscall.Errno = 187 + ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 + ERROR_INVALID_STACKSEG syscall.Errno = 189 + ERROR_INVALID_MODULETYPE syscall.Errno = 190 + ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 + ERROR_EXE_MARKED_INVALID syscall.Errno = 192 + ERROR_BAD_EXE_FORMAT syscall.Errno = 193 + ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 + ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 + ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 + ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 + ERROR_INVALID_SEGDPL syscall.Errno = 198 + ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 + ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 + ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 + ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_NO_SIGNAL_SENT syscall.Errno = 205 + ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 + ERROR_RING2_STACK_IN_USE syscall.Errno = 207 + ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 + ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 + ERROR_THREAD_1_INACTIVE syscall.Errno = 210 + ERROR_LOCKED syscall.Errno = 212 + ERROR_TOO_MANY_MODULES syscall.Errno = 214 + ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 + ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 + ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 + ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 + ERROR_FILE_CHECKED_OUT syscall.Errno = 220 + ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 + ERROR_BAD_FILE_TYPE syscall.Errno = 222 + ERROR_FILE_TOO_LARGE syscall.Errno = 223 + ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 + ERROR_VIRUS_INFECTED syscall.Errno = 225 + ERROR_VIRUS_DELETED syscall.Errno = 226 + ERROR_PIPE_LOCAL syscall.Errno = 229 + ERROR_BAD_PIPE syscall.Errno = 230 + ERROR_PIPE_BUSY syscall.Errno = 231 + ERROR_NO_DATA syscall.Errno = 232 + ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_NO_WORK_DONE syscall.Errno = 235 + ERROR_VC_DISCONNECTED syscall.Errno = 240 + ERROR_INVALID_EA_NAME syscall.Errno = 254 + ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 + WAIT_TIMEOUT syscall.Errno = 258 + ERROR_NO_MORE_ITEMS syscall.Errno = 259 + ERROR_CANNOT_COPY syscall.Errno = 266 + ERROR_DIRECTORY syscall.Errno = 267 + ERROR_EAS_DIDNT_FIT syscall.Errno = 275 + ERROR_EA_FILE_CORRUPT syscall.Errno = 276 + ERROR_EA_TABLE_FULL syscall.Errno = 277 + ERROR_INVALID_EA_HANDLE syscall.Errno = 278 + ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 + ERROR_NOT_OWNER syscall.Errno = 288 + ERROR_TOO_MANY_POSTS syscall.Errno = 298 + ERROR_PARTIAL_COPY syscall.Errno = 299 + ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 + ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 + ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 + ERROR_DELETE_PENDING syscall.Errno = 303 + ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 + ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 + ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 + ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 + ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 + ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 + ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 + ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 + ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 + ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 + ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 + ERROR_INVALID_TOKEN syscall.Errno = 315 + ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 + ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 + ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 + ERROR_UNDEFINED_SCOPE syscall.Errno = 319 + ERROR_INVALID_CAP syscall.Errno = 320 + ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 + ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 + ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 + ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 + ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 + ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 + ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 + ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 + ERROR_BAD_DEVICE_PATH syscall.Errno = 330 + ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 + ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 + ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 + ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 + ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 + ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 + ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 + ERROR_FT_WRITE_FAILURE syscall.Errno = 338 + ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 + ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 + ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 + ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 + ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 + ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 + ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 + ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 + ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 + ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 + ERROR_ENCLAVE_FAILURE syscall.Errno = 349 + ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 + ERROR_FAIL_SHUTDOWN syscall.Errno = 351 + ERROR_FAIL_RESTART syscall.Errno = 352 + ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 + ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 + ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 + ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 + ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 + ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 + ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 + ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 + ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 + ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 + ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 + ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 + ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 + ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 + ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 + ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 + ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 + ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 + ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 + ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 + ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 + ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 + ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 + ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 + ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 + ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 + ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 + ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 + ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 + ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 + ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 + ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 + ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 + ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 + ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 + ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 + ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 + ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 + ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 + ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 + ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 + ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 + ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 + ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 + ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 + ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 + ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 + ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 + ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 + ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 + ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 + ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 + ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 + ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 + ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 + ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 + ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 + ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 + ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 + ERROR_FT_READ_FAILURE syscall.Errno = 415 + ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 + ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 + ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 + ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 + ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 + ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 + ERROR_TIME_CRITICAL_THREAD syscall.Errno = 422 + ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 + ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 + ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 + ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 + ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 + ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 + ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 + ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 + ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 + ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 + ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 + ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 + ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 + ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 + ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 + ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 + ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 + ERROR_INVALID_ADDRESS syscall.Errno = 487 + ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 + ERROR_PARTITION_TERMINATING syscall.Errno = 1184 + ERROR_USER_PROFILE_LOAD syscall.Errno = 500 + ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 + ERROR_PIPE_CONNECTED syscall.Errno = 535 + ERROR_PIPE_LISTENING syscall.Errno = 536 + ERROR_VERIFIER_STOP syscall.Errno = 537 + ERROR_ABIOS_ERROR syscall.Errno = 538 + ERROR_WX86_WARNING syscall.Errno = 539 + ERROR_WX86_ERROR syscall.Errno = 540 + ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 + ERROR_UNWIND syscall.Errno = 542 + ERROR_BAD_STACK syscall.Errno = 543 + ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 + ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 + ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 + ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 + ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 + ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 + ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 + ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 + ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 + ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 + ERROR_CANT_WAIT syscall.Errno = 554 + ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 + ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 + ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 + ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 + ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 + ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 + ERROR_INVALID_LDT_SIZE syscall.Errno = 561 + ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 + ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 + ERROR_TOO_MANY_THREADS syscall.Errno = 565 + ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 + ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 + ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 + ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 + ERROR_NET_OPEN_FAILED syscall.Errno = 570 + ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 + ERROR_CONTROL_C_EXIT syscall.Errno = 572 + ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 + ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 + ERROR_APP_INIT_FAILURE syscall.Errno = 575 + ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 + ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 + ERROR_NO_PAGEFILE syscall.Errno = 578 + ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 + ERROR_NO_EVENT_PAIR syscall.Errno = 580 + ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 + ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 + ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 + ERROR_FLOPPY_VOLUME syscall.Errno = 584 + ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 + ERROR_BACKUP_CONTROLLER syscall.Errno = 586 + ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 + ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 + ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 + ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 + ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 + ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 + ERROR_VDM_HARD_ERROR syscall.Errno = 593 + ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 + ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 + ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 + ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 + ERROR_NOT_TINY_STREAM syscall.Errno = 598 + ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 + ERROR_CONVERT_TO_LARGE syscall.Errno = 600 + ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 + ERROR_ALLOCATE_BUCKET syscall.Errno = 602 + ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 + ERROR_INVALID_VARIANT syscall.Errno = 604 + ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 + ERROR_AUDIT_FAILED syscall.Errno = 606 + ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 + ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 + ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 + ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 + ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 + ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 + ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 + ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 + ERROR_PWD_TOO_SHORT syscall.Errno = 615 + ERROR_PWD_TOO_RECENT syscall.Errno = 616 + ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 + ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 + ERROR_INVALID_HW_PROFILE syscall.Errno = 619 + ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 + ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 + ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 + ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 + ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 + ERROR_VALIDATE_CONTINUE syscall.Errno = 625 + ERROR_NO_MORE_MATCHES syscall.Errno = 626 + ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 + ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 + ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 + ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 + ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 + ERROR_NOINTERFACE syscall.Errno = 632 + ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 + ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 + ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 + ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 + ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 + ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 + ERROR_INSUFFICIENT_POWER syscall.Errno = 639 + ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 + ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 + ERROR_PORT_NOT_SET syscall.Errno = 642 + ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 + ERROR_RANGE_NOT_FOUND syscall.Errno = 644 + ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 + ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 + ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 + ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 + ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 + ERROR_MCA_OCCURED syscall.Errno = 651 + ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 + ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 + ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 + ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 + ERROR_HIBERNATION_FAILURE syscall.Errno = 656 + ERROR_PWD_TOO_LONG syscall.Errno = 657 + ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 + ERROR_ASSERTION_FAILURE syscall.Errno = 668 + ERROR_ACPI_ERROR syscall.Errno = 669 + ERROR_WOW_ASSERTION syscall.Errno = 670 + ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 + ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 + ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 + ERROR_PNP_INVALID_ID syscall.Errno = 674 + ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 + ERROR_HANDLES_CLOSED syscall.Errno = 676 + ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 + ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 + ERROR_MEDIA_CHECK syscall.Errno = 679 + ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 + ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 + ERROR_LONGJUMP syscall.Errno = 682 + ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 + ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 + ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 + ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 + ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 + ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 + ERROR_DBG_REPLY_LATER syscall.Errno = 689 + ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 + ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 + ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 + ERROR_DBG_CONTROL_C syscall.Errno = 693 + ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 + ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 + ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 + ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 + ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 + ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 + ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 + ERROR_RXACT_STATE_CREATED syscall.Errno = 701 + ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 + ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 + ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 + ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 + ERROR_RECEIVE_PARTIAL syscall.Errno = 707 + ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 + ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 + ERROR_EVENT_DONE syscall.Errno = 710 + ERROR_EVENT_PENDING syscall.Errno = 711 + ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 + ERROR_FATAL_APP_EXIT syscall.Errno = 713 + ERROR_PREDEFINED_HANDLE syscall.Errno = 714 + ERROR_WAS_UNLOCKED syscall.Errno = 715 + ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 + ERROR_WAS_LOCKED syscall.Errno = 717 + ERROR_LOG_HARD_ERROR syscall.Errno = 718 + ERROR_ALREADY_WIN32 syscall.Errno = 719 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 + ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 + ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 + ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 + ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 + ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 + ERROR_HIBERNATED syscall.Errno = 726 + ERROR_RESUME_HIBERNATION syscall.Errno = 727 + ERROR_FIRMWARE_UPDATED syscall.Errno = 728 + ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 + ERROR_WAKE_SYSTEM syscall.Errno = 730 + ERROR_WAIT_1 syscall.Errno = 731 + ERROR_WAIT_2 syscall.Errno = 732 + ERROR_WAIT_3 syscall.Errno = 733 + ERROR_WAIT_63 syscall.Errno = 734 + ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 + ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 + ERROR_USER_APC syscall.Errno = 737 + ERROR_KERNEL_APC syscall.Errno = 738 + ERROR_ALERTED syscall.Errno = 739 + ERROR_ELEVATION_REQUIRED syscall.Errno = 740 + ERROR_REPARSE syscall.Errno = 741 + ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 + ERROR_VOLUME_MOUNTED syscall.Errno = 743 + ERROR_RXACT_COMMITTED syscall.Errno = 744 + ERROR_NOTIFY_CLEANUP syscall.Errno = 745 + ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 + ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 + ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 + ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 + ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 + ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 + ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 + ERROR_CRASH_DUMP syscall.Errno = 753 + ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 + ERROR_REPARSE_OBJECT syscall.Errno = 755 + ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 + ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 + ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 + ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 + ERROR_PROCESS_IN_JOB syscall.Errno = 760 + ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 + ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 + ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 + ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 + ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 + ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 + ERROR_DBG_CONTINUE syscall.Errno = 767 + ERROR_CALLBACK_POP_STACK syscall.Errno = 768 + ERROR_COMPRESSION_DISABLED syscall.Errno = 769 + ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 + ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 + ERROR_ROWSNOTRELEASED syscall.Errno = 772 + ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 + ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 + ERROR_NOT_CAPABLE syscall.Errno = 775 + ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 + ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 + ERROR_BADSTARTPOSITION syscall.Errno = 778 + ERROR_MEMORY_HARDWARE syscall.Errno = 779 + ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 + ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 + ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 + ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 + ERROR_MCA_EXCEPTION syscall.Errno = 784 + ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 + ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 + ERROR_ABANDON_HIBERFILE syscall.Errno = 787 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 + ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 + ERROR_BAD_MCFG_TABLE syscall.Errno = 791 + ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 + ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 + ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 + ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 + ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 + ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 + ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 + ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 + ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 + ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 + ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 + ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 + ERROR_NO_ACE_CONDITION syscall.Errno = 804 + ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 + ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 + ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 + ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 + ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 + ERROR_QUOTA_ACTIVITY syscall.Errno = 810 + ERROR_HANDLE_REVOKED syscall.Errno = 811 + ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 + ERROR_CPU_SET_INVALID syscall.Errno = 813 + ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 + ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 + ERROR_EA_ACCESS_DENIED syscall.Errno = 994 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_INCOMPLETE syscall.Errno = 996 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_NOACCESS syscall.Errno = 998 + ERROR_SWAPERROR syscall.Errno = 999 + ERROR_STACK_OVERFLOW syscall.Errno = 1001 + ERROR_INVALID_MESSAGE syscall.Errno = 1002 + ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 + ERROR_INVALID_FLAGS syscall.Errno = 1004 + ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 + ERROR_FILE_INVALID syscall.Errno = 1006 + ERROR_FULLSCREEN_MODE syscall.Errno = 1007 + ERROR_NO_TOKEN syscall.Errno = 1008 + ERROR_BADDB syscall.Errno = 1009 + ERROR_BADKEY syscall.Errno = 1010 + ERROR_CANTOPEN syscall.Errno = 1011 + ERROR_CANTREAD syscall.Errno = 1012 + ERROR_CANTWRITE syscall.Errno = 1013 + ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 + ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 + ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 + ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 + ERROR_KEY_DELETED syscall.Errno = 1018 + ERROR_NO_LOG_SPACE syscall.Errno = 1019 + ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 + ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 + ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 + ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 + ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 + ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 + ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 + ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 + ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 + ERROR_SERVICE_DISABLED syscall.Errno = 1058 + ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 + ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 + ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 + ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 + ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 + ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_PROCESS_ABORTED syscall.Errno = 1067 + ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 + ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 + ERROR_SERVICE_START_HANG syscall.Errno = 1070 + ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 + ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 + ERROR_SERVICE_EXISTS syscall.Errno = 1073 + ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 + ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 + ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 + ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 + ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 + ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 + ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 + ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 + ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 + ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 + ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 + ERROR_END_OF_MEDIA syscall.Errno = 1100 + ERROR_FILEMARK_DETECTED syscall.Errno = 1101 + ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 + ERROR_SETMARK_DETECTED syscall.Errno = 1103 + ERROR_NO_DATA_DETECTED syscall.Errno = 1104 + ERROR_PARTITION_FAILURE syscall.Errno = 1105 + ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 + ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 + ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 + ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 + ERROR_MEDIA_CHANGED syscall.Errno = 1110 + ERROR_BUS_RESET syscall.Errno = 1111 + ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 + ERROR_DLL_INIT_FAILED syscall.Errno = 1114 + ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 + ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 + ERROR_IO_DEVICE syscall.Errno = 1117 + ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 + ERROR_IRQ_BUSY syscall.Errno = 1119 + ERROR_MORE_WRITES syscall.Errno = 1120 + ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 + ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 + ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 + ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 + ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 + ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 + ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 + ERROR_DISK_RESET_FAILED syscall.Errno = 1128 + ERROR_EOM_OVERFLOW syscall.Errno = 1129 + ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 + ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 + ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 + ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 + ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 + ERROR_TOO_MANY_LINKS syscall.Errno = 1142 + ERROR_OLD_WIN_VERSION syscall.Errno = 1150 + ERROR_APP_WRONG_OS syscall.Errno = 1151 + ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 + ERROR_RMODE_APP syscall.Errno = 1153 + ERROR_INVALID_DLL syscall.Errno = 1154 + ERROR_NO_ASSOCIATION syscall.Errno = 1155 + ERROR_DDE_FAIL syscall.Errno = 1156 + ERROR_DLL_NOT_FOUND syscall.Errno = 1157 + ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 + ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 + ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 + ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 + ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 + ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 + ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 + ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 + ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 + ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_MATCH syscall.Errno = 1169 + ERROR_SET_NOT_FOUND syscall.Errno = 1170 + ERROR_POINT_NOT_FOUND syscall.Errno = 1171 + ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 + ERROR_NO_VOLUME_ID syscall.Errno = 1173 + ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 + ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 + ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 + ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 + ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 + ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 + ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 + ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 + ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 + ERROR_BAD_DEVICE syscall.Errno = 1200 + ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 + ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 + ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 + ERROR_BAD_PROVIDER syscall.Errno = 1204 + ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 + ERROR_BAD_PROFILE syscall.Errno = 1206 + ERROR_NOT_CONTAINER syscall.Errno = 1207 + ERROR_EXTENDED_ERROR syscall.Errno = 1208 + ERROR_INVALID_GROUPNAME syscall.Errno = 1209 + ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 + ERROR_INVALID_EVENTNAME syscall.Errno = 1211 + ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 + ERROR_INVALID_SERVICENAME syscall.Errno = 1213 + ERROR_INVALID_NETNAME syscall.Errno = 1214 + ERROR_INVALID_SHARENAME syscall.Errno = 1215 + ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 + ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 + ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 + ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 + ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 + ERROR_DUP_DOMAINNAME syscall.Errno = 1221 + ERROR_NO_NETWORK syscall.Errno = 1222 + ERROR_CANCELLED syscall.Errno = 1223 + ERROR_USER_MAPPED_FILE syscall.Errno = 1224 + ERROR_CONNECTION_REFUSED syscall.Errno = 1225 + ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 + ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 + ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 + ERROR_CONNECTION_INVALID syscall.Errno = 1229 + ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 + ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 + ERROR_HOST_UNREACHABLE syscall.Errno = 1232 + ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 + ERROR_PORT_UNREACHABLE syscall.Errno = 1234 + ERROR_REQUEST_ABORTED syscall.Errno = 1235 + ERROR_CONNECTION_ABORTED syscall.Errno = 1236 + ERROR_RETRY syscall.Errno = 1237 + ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 + ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 + ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 + ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 + ERROR_ALREADY_REGISTERED syscall.Errno = 1242 + ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 + ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 + ERROR_NOT_LOGGED_ON syscall.Errno = 1245 + ERROR_CONTINUE syscall.Errno = 1246 + ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 + ERROR_NO_MORE_DEVICES syscall.Errno = 1248 + ERROR_NO_SUCH_SITE syscall.Errno = 1249 + ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 + ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 + ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 + ERROR_BAD_USER_PROFILE syscall.Errno = 1253 + ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 + ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 + ERROR_HOST_DOWN syscall.Errno = 1256 + ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 + ERROR_NON_DOMAIN_SID syscall.Errno = 1258 + ERROR_APPHELP_BLOCK syscall.Errno = 1259 + ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 + ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 + ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 + ERROR_PKINIT_FAILURE syscall.Errno = 1263 + ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 + ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 + ERROR_MACHINE_LOCKED syscall.Errno = 1271 + ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 + ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 + ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 + ERROR_DRIVER_BLOCKED syscall.Errno = 1275 + ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 + ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 + ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 + ERROR_RECOVERY_FAILURE syscall.Errno = 1279 + ERROR_ALREADY_FIBER syscall.Errno = 1280 + ERROR_ALREADY_THREAD syscall.Errno = 1281 + ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 + ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 + ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 + ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 + ERROR_VDM_DISALLOWED syscall.Errno = 1286 + ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 + ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 + ERROR_BEYOND_VDL syscall.Errno = 1289 + ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 + ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 + ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 + ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 + ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 + ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 + ERROR_CONTENT_BLOCKED syscall.Errno = 1296 + ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 + ERROR_APP_HANG syscall.Errno = 1298 + ERROR_INVALID_LABEL syscall.Errno = 1299 + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 + ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 + ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 + ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 + ERROR_UNKNOWN_REVISION syscall.Errno = 1305 + ERROR_REVISION_MISMATCH syscall.Errno = 1306 + ERROR_INVALID_OWNER syscall.Errno = 1307 + ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 + ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 + ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 + ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 + ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 + ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 + ERROR_USER_EXISTS syscall.Errno = 1316 + ERROR_NO_SUCH_USER syscall.Errno = 1317 + ERROR_GROUP_EXISTS syscall.Errno = 1318 + ERROR_NO_SUCH_GROUP syscall.Errno = 1319 + ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 + ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 + ERROR_LAST_ADMIN syscall.Errno = 1322 + ERROR_WRONG_PASSWORD syscall.Errno = 1323 + ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 + ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 + ERROR_LOGON_FAILURE syscall.Errno = 1326 + ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 + ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 + ERROR_INVALID_WORKSTATION syscall.Errno = 1329 + ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 + ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 + ERROR_NONE_MAPPED syscall.Errno = 1332 + ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 + ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 + ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 + ERROR_INVALID_ACL syscall.Errno = 1336 + ERROR_INVALID_SID syscall.Errno = 1337 + ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 + ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 + ERROR_SERVER_DISABLED syscall.Errno = 1341 + ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 + ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 + ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 + ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 + ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 + ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 + ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 + ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 + ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 + ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 + ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 + ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 + ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 + ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 + ERROR_DOMAIN_EXISTS syscall.Errno = 1356 + ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 + ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 + ERROR_INTERNAL_ERROR syscall.Errno = 1359 + ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 + ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 + ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 + ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 + ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 + ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 + ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 + ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 + ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 + ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 + ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 + ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 + ERROR_SPECIAL_GROUP syscall.Errno = 1372 + ERROR_SPECIAL_USER syscall.Errno = 1373 + ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 + ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 + ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 + ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 + ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 + ERROR_ALIAS_EXISTS syscall.Errno = 1379 + ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 + ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 + ERROR_SECRET_TOO_LONG syscall.Errno = 1382 + ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 + ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 + ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 + ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 + ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 + ERROR_INVALID_MEMBER syscall.Errno = 1388 + ERROR_TOO_MANY_SIDS syscall.Errno = 1389 + ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 + ERROR_NO_INHERITANCE syscall.Errno = 1391 + ERROR_FILE_CORRUPT syscall.Errno = 1392 + ERROR_DISK_CORRUPT syscall.Errno = 1393 + ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 + ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 + ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 + ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 + ERROR_TIME_SKEW syscall.Errno = 1398 + ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 + ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 + ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 + ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 + ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 + ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 + ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 + ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 + ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 + ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 + ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 + ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 + ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 + ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 + ERROR_INVALID_INDEX syscall.Errno = 1413 + ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 + ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 + ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 + ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 + ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 + ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 + ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 + ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 + ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 + ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 + ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 + ERROR_DC_NOT_FOUND syscall.Errno = 1425 + ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 + ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 + ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 + ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 + ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 + ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 + ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 + ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 + ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 + ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 + ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 + ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 + ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 + ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 + ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 + ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 + ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 + ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 + ERROR_INVALID_THREAD_ID syscall.Errno = 1444 + ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 + ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 + ERROR_NO_SCROLLBARS syscall.Errno = 1447 + ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 + ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 + ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 + ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 + ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 + ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 + ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 + ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 + ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 + ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 + ERROR_INCORRECT_SIZE syscall.Errno = 1462 + ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 + ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 + ERROR_XML_PARSE_ERROR syscall.Errno = 1465 + ERROR_XMLDSIG_ERROR syscall.Errno = 1466 + ERROR_RESTART_APPLICATION syscall.Errno = 1467 + ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 + ERROR_AUTHIP_FAILURE syscall.Errno = 1469 + ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 + ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 + ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 + ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 + ERROR_LOG_FILE_FULL syscall.Errno = 1502 + ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 + ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 + ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 + ERROR_INVALID_TASK_NAME syscall.Errno = 1550 + ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 + ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 + ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 + ERROR_INSTALL_USEREXIT syscall.Errno = 1602 + ERROR_INSTALL_FAILURE syscall.Errno = 1603 + ERROR_INSTALL_SUSPEND syscall.Errno = 1604 + ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 + ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 + ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 + ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 + ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 + ERROR_BAD_CONFIGURATION syscall.Errno = 1610 + ERROR_INDEX_ABSENT syscall.Errno = 1611 + ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 + ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 + ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 + ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 + ERROR_INVALID_FIELD syscall.Errno = 1616 + ERROR_DEVICE_REMOVED syscall.Errno = 1617 + ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 + ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 + ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 + ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 + ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 + ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 + ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 + ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 + ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 + ERROR_FUNCTION_FAILED syscall.Errno = 1627 + ERROR_INVALID_TABLE syscall.Errno = 1628 + ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 + ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 + ERROR_CREATE_FAILED syscall.Errno = 1631 + ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 + ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 + ERROR_INSTALL_NOTUSED syscall.Errno = 1634 + ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 + ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 + ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 + ERROR_PRODUCT_VERSION syscall.Errno = 1638 + ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 + ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 + ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 + ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 + ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 + ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 + ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 + ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 + ERROR_UNKNOWN_PATCH syscall.Errno = 1647 + ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 + ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 + ERROR_INVALID_PATCH_XML syscall.Errno = 1650 + ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 + ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 + ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 + ERROR_INSTALL_REJECTED syscall.Errno = 1654 + ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 + ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 + ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 + ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 + ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 + RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 + RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 + RPC_S_INVALID_BINDING syscall.Errno = 1702 + RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 + RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 + RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 + RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 + RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 + RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 + RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 + RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 + RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 + RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 + RPC_S_ALREADY_LISTENING syscall.Errno = 1713 + RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 + RPC_S_NOT_LISTENING syscall.Errno = 1715 + RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 + RPC_S_UNKNOWN_IF syscall.Errno = 1717 + RPC_S_NO_BINDINGS syscall.Errno = 1718 + RPC_S_NO_PROTSEQS syscall.Errno = 1719 + RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 + RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 + RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 + RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 + RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 + RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 + RPC_S_CALL_FAILED syscall.Errno = 1726 + RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 + RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 + RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 + RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 + RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 + RPC_S_INVALID_TAG syscall.Errno = 1733 + RPC_S_INVALID_BOUND syscall.Errno = 1734 + RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 + RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 + RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 + RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 + RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 + RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 + RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 + RPC_S_STRING_TOO_LONG syscall.Errno = 1743 + RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 + RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 + RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 + RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 + RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 + RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 + RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 + EPT_S_INVALID_ENTRY syscall.Errno = 1751 + EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 + EPT_S_NOT_REGISTERED syscall.Errno = 1753 + RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 + RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 + RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 + RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 + RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 + RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 + RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 + RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 + RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 + RPC_S_INVALID_NAF_ID syscall.Errno = 1763 + RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 + RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 + RPC_S_INTERNAL_ERROR syscall.Errno = 1766 + RPC_S_ZERO_DIVIDE syscall.Errno = 1767 + RPC_S_ADDRESS_ERROR syscall.Errno = 1768 + RPC_S_FP_DIV_ZERO syscall.Errno = 1769 + RPC_S_FP_UNDERFLOW syscall.Errno = 1770 + RPC_S_FP_OVERFLOW syscall.Errno = 1771 + RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 + RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 + RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 + RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 + RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 + RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 + RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 + RPC_X_NULL_REF_POINTER syscall.Errno = 1780 + RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 + RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 + RPC_X_BAD_STUB_DATA syscall.Errno = 1783 + ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 + ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 + ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 + ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 + ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 + ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 + ERROR_TRUST_FAILURE syscall.Errno = 1790 + RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 + ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 + ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 + ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 + ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 + ERROR_UNKNOWN_PORT syscall.Errno = 1796 + ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 + ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 + ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 + ERROR_INVALID_PRIORITY syscall.Errno = 1800 + ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 + ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 + ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 + ERROR_INVALID_DATATYPE syscall.Errno = 1804 + ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 + RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 + ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 + ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 + ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 + ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 + ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 + ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 + ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 + ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 + ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 + ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 + RPC_S_NO_INTERFACES syscall.Errno = 1817 + RPC_S_CALL_CANCELLED syscall.Errno = 1818 + RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 + RPC_S_COMM_FAILURE syscall.Errno = 1820 + RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 + RPC_S_NO_PRINC_NAME syscall.Errno = 1822 + RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 + RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 + RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 + RPC_S_NOT_CANCELLED syscall.Errno = 1826 + RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 + RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 + RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 + RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 + RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 + RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 + RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 + RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 + RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 + RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 + RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 + EPT_S_CANT_CREATE syscall.Errno = 1899 + RPC_S_INVALID_OBJECT syscall.Errno = 1900 + ERROR_INVALID_TIME syscall.Errno = 1901 + ERROR_INVALID_FORM_NAME syscall.Errno = 1902 + ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 + ERROR_ALREADY_WAITING syscall.Errno = 1904 + ERROR_PRINTER_DELETED syscall.Errno = 1905 + ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 + ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 + ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 + ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 + OR_INVALID_OXID syscall.Errno = 1910 + OR_INVALID_OID syscall.Errno = 1911 + OR_INVALID_SET syscall.Errno = 1912 + RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 + RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 + RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 + RPC_X_PIPE_CLOSED syscall.Errno = 1916 + RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 + RPC_X_PIPE_EMPTY syscall.Errno = 1918 + ERROR_NO_SITENAME syscall.Errno = 1919 + ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 + ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 + RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 + RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 + RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 + RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 + RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 + RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 + RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 + RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 + ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 + ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 + ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 + ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 + ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 + ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 + ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 + ERROR_NTLM_BLOCKED syscall.Errno = 1937 + ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 + ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 + ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 + ERROR_BAD_DRIVER syscall.Errno = 2001 + ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 + ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 + ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 + ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 + ERROR_INVALID_CMM syscall.Errno = 2010 + ERROR_INVALID_PROFILE syscall.Errno = 2011 + ERROR_TAG_NOT_FOUND syscall.Errno = 2012 + ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 + ERROR_DUPLICATE_TAG syscall.Errno = 2014 + ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 + ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 + ERROR_INVALID_COLORSPACE syscall.Errno = 2017 + ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 + ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 + ERROR_INVALID_TRANSFORM syscall.Errno = 2020 + ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 + ERROR_INVALID_COLORINDEX syscall.Errno = 2022 + ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 + ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 + ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 + ERROR_BAD_USERNAME syscall.Errno = 2202 + ERROR_NOT_CONNECTED syscall.Errno = 2250 + ERROR_OPEN_FILES syscall.Errno = 2401 + ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 + ERROR_DEVICE_IN_USE syscall.Errno = 2404 + ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 + ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 + ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 + ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 + ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 + ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 + ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 + ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 + ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 + ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 + ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 + ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 + ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 + ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 + ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 + ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 + ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 + ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 + ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 + ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 + ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 + ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 + ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 + ERROR_REQUEST_PAUSED syscall.Errno = 3050 + ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 + ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 + ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 + ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 + ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 + ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 + ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 + ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 + ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 + ERROR_WINS_INTERNAL syscall.Errno = 4000 + ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 + ERROR_STATIC_INIT syscall.Errno = 4002 + ERROR_INC_BACKUP syscall.Errno = 4003 + ERROR_FULL_BACKUP syscall.Errno = 4004 + ERROR_REC_NON_EXISTENT syscall.Errno = 4005 + ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 + PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 + PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 + PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 + PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 + PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 + PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 + PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 + PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 + PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 + PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 + PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 + PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 + PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 + PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 + PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 + PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 + PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 + ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 + ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 + ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 + ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 + ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 + ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 + ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 + ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 + ERROR_WMI_DP_FAILED syscall.Errno = 4209 + ERROR_WMI_INVALID_MOF syscall.Errno = 4210 + ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 + ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 + ERROR_WMI_READ_ONLY syscall.Errno = 4213 + ERROR_WMI_SET_FAILURE syscall.Errno = 4214 + ERROR_NOT_APPCONTAINER syscall.Errno = 4250 + ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 + ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 + ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 + ERROR_INVALID_MEDIA syscall.Errno = 4300 + ERROR_INVALID_LIBRARY syscall.Errno = 4301 + ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 + ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 + ERROR_MEDIA_OFFLINE syscall.Errno = 4304 + ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 + ERROR_EMPTY syscall.Errno = 4306 + ERROR_NOT_EMPTY syscall.Errno = 4307 + ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 + ERROR_RESOURCE_DISABLED syscall.Errno = 4309 + ERROR_INVALID_CLEANER syscall.Errno = 4310 + ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 + ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 + ERROR_DATABASE_FAILURE syscall.Errno = 4313 + ERROR_DATABASE_FULL syscall.Errno = 4314 + ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 + ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 + ERROR_INVALID_OPERATION syscall.Errno = 4317 + ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 + ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 + ERROR_REQUEST_REFUSED syscall.Errno = 4320 + ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 + ERROR_LIBRARY_FULL syscall.Errno = 4322 + ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 + ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 + ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 + ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 + ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 + ERROR_TRANSPORT_FULL syscall.Errno = 4328 + ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 + ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 + ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 + ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 + ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 + ERROR_UNEXPECTED_OMID syscall.Errno = 4334 + ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 + ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 + ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 + ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 + ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 + ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 + ERROR_IEPORT_FULL syscall.Errno = 4341 + ERROR_FILE_OFFLINE syscall.Errno = 4350 + ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 + ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 + ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 + ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 + ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 + ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 + ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 + ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 + ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 + ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 + ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 + ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 + ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 + ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 + ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 + ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 + ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 + ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 + ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 + ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 + ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 + ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 + ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 + ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 + ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 + ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 + ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 + ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 + ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 + ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 + ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 + ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 + ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 + ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 + ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 + ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 + ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 + ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 + ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 + ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 + ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 + ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 + ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 + ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 + ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 + ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 + ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 + ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 + ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 + ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 + ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 + ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 + ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 + ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 + ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 + ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 + ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 + ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 + ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 + ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 + ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 + ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 + ERROR_OBJECT_IN_LIST syscall.Errno = 5011 + ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 + ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 + ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 + ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 + ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 + ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 + ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 + ERROR_RESOURCE_ONLINE syscall.Errno = 5019 + ERROR_QUORUM_RESOURCE syscall.Errno = 5020 + ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 + ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 + ERROR_INVALID_STATE syscall.Errno = 5023 + ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 + ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 + ERROR_CORE_RESOURCE syscall.Errno = 5026 + ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 + ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 + ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 + ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 + ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 + ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 + ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 + ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 + ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 + ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 + ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 + ERROR_RESOURCE_FAILED syscall.Errno = 5038 + ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 + ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 + ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 + ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 + ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 + ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 + ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 + ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 + ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 + ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 + ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 + ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 + ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 + ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 + ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 + ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 + ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 + ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 + ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 + ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 + ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 + ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 + ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 + ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 + ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 + ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 + ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 + ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 + ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 + ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 + ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 + ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 + ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 + ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 + ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 + ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 + ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 + ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 + ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 + ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 + ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 + ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 + ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 + ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 + ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 + ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 + ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 + ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 + ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 + ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 + ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 + ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 + ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 + ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 + ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 + ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 + ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 + ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 + ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 + ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 + ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 + ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 + ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 + ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 + ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 + ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 + ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 + ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 + ERROR_CLUSTER_POISONED syscall.Errno = 5907 + ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 + ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 + ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 + ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 + ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 + ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 + ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 + ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 + ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 + ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 + ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 + ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 + ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 + ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 + ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 + ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 + ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 + ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 + ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 + ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 + ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 + ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 + ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 + ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 + ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 + ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 + ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 + ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 + ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 + ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 + ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 + ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 + ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 + ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 + ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 + ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 + ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 + ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 + ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 + ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 + ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 + ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 + ERROR_NON_CSV_PATH syscall.Errno = 5950 + ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 + ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 + ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 + ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 + ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 + ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 + ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 + ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 + ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 + ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 + ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 + ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 + ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 + ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 + ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 + ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 + ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 + ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 + ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 + ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 + ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 + ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 + ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 + ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 + ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 + ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 + ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 + ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 + ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 + ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 + ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 + ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 + ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 + ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 + ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 + ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 + ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 + ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 + ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 + ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 + ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 + ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 + ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 + ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 + ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 + ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 + ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 + ERROR_DECRYPTION_FAILED syscall.Errno = 6001 + ERROR_FILE_ENCRYPTED syscall.Errno = 6002 + ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 + ERROR_NO_EFS syscall.Errno = 6004 + ERROR_WRONG_EFS syscall.Errno = 6005 + ERROR_NO_USER_KEYS syscall.Errno = 6006 + ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 + ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 + ERROR_FILE_READ_ONLY syscall.Errno = 6009 + ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 + ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 + ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 + ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 + ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 + ERROR_EFS_DISABLED syscall.Errno = 6015 + ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 + ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 + ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 + ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 + ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 + ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 + ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 + ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 + SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 + ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 + ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 + ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 + ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 + ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 + ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 + ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 + ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 + ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 + ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 + ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 + ERROR_LOG_NO_RESTART syscall.Errno = 6611 + ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 + ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 + ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 + ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 + ERROR_LOG_CANT_DELETE syscall.Errno = 6616 + ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 + ERROR_LOG_START_OF_LOG syscall.Errno = 6618 + ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 + ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 + ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 + ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 + ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 + ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 + ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 + ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 + ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 + ERROR_LOG_FULL syscall.Errno = 6628 + ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 + ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 + ERROR_LOG_DEDICATED syscall.Errno = 6631 + ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 + ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 + ERROR_LOG_EPHEMERAL syscall.Errno = 6634 + ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 + ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 + ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 + ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 + ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 + ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 + ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 + ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 + ERROR_LOG_STATE_INVALID syscall.Errno = 6643 + ERROR_LOG_PINNED syscall.Errno = 6644 + ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 + ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 + ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 + ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 + ERROR_INVALID_TRANSACTION syscall.Errno = 6700 + ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 + ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 + ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 + ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 + ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 + ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 + ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 + ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 + ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 + ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 + ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 + ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 + ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 + ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 + ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 + ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 + ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 + ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 + ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 + ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 + ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 + ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 + ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 + ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 + ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 + ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 + ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 + ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 + ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 + ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 + ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 + ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 + ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 + ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 + ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 + ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 + ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 + ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 + ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 + ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 + ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 + ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 + ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 + ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 + ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 + ERROR_NO_TXF_METADATA syscall.Errno = 6816 + ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 + ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 + ERROR_RM_DISCONNECTED syscall.Errno = 6819 + ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 + ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 + ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 + ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 + ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 + ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 + ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 + ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 + ERROR_TM_VOLATILE syscall.Errno = 6828 + ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 + ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 + ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 + ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 + ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 + ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 + ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 + ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 + ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 + ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 + ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 + ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 + ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 + ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 + ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 + ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 + ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 + ERROR_FLOATED_SECTION syscall.Errno = 6846 + ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 + ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 + ERROR_BAD_CLUSTERS syscall.Errno = 6849 + ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 + ERROR_VOLUME_DIRTY syscall.Errno = 6851 + ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 + ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 + ERROR_EXPIRED_HANDLE syscall.Errno = 6854 + ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 + ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 + ERROR_CTX_INVALID_PD syscall.Errno = 7002 + ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 + ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 + ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 + ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 + ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 + ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 + ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 + ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 + ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 + ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 + ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 + ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 + ERROR_CTX_TD_ERROR syscall.Errno = 7017 + ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 + ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 + ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 + ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 + ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 + ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 + ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 + ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 + ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 + ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 + ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 + ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 + ERROR_CTX_INVALID_WD syscall.Errno = 7049 + ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 + ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 + ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 + ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 + ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 + ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 + ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 + ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 + ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 + ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 + ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 + ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 + ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 + ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 + ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 + ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 + ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 + ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 + ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 + ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 + ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 + FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 + FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 + FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 + FRS_ERR_INTERNAL_API syscall.Errno = 8004 + FRS_ERR_INTERNAL syscall.Errno = 8005 + FRS_ERR_SERVICE_COMM syscall.Errno = 8006 + FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 + FRS_ERR_AUTHENTICATION syscall.Errno = 8008 + FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 + FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 + FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 + FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 + FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 + FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 + FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 + FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 + FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 + DS_S_SUCCESS = ERROR_SUCCESS + ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 + ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 + ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 + ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 + ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 + ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 + ERROR_DS_BUSY syscall.Errno = 8206 + ERROR_DS_UNAVAILABLE syscall.Errno = 8207 + ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 + ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 + ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 + ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 + ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 + ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 + ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 + ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 + ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 + ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 + ERROR_SHARED_POLICY syscall.Errno = 8218 + ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 + ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 + ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 + ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 + ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 + ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 + ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 + ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 + ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 + ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 + ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 + ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 + ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 + ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 + ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 + ERROR_DS_REFERRAL syscall.Errno = 8235 + ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 + ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 + ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 + ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 + ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 + ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 + ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 + ERROR_DS_IS_LEAF syscall.Errno = 8243 + ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 + ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 + ERROR_DS_LOOP_DETECT syscall.Errno = 8246 + ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 + ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 + ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 + ERROR_DS_SERVER_DOWN syscall.Errno = 8250 + ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 + ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 + ERROR_DS_DECODING_ERROR syscall.Errno = 8253 + ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 + ERROR_DS_PARAM_ERROR syscall.Errno = 8255 + ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 + ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 + ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 + ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 + ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 + ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 + ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 + ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 + ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 + ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 + ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 + ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 + ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 + ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 + ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 + ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 + ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 + ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 + ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 + ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 + ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 + ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 + ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 + ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 + ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 + ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 + ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 + ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 + ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 + ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 + ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 + ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 + ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 + ERROR_DS_NO_CHAINING syscall.Errno = 8327 + ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 + ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 + ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 + ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 + ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 + ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 + ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 + ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 + ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 + ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 + ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 + ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 + ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 + ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 + ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 + ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 + ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 + ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 + ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 + ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 + ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 + ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 + ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 + ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 + ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 + ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 + ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 + ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 + ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 + ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 + ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 + ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 + ERROR_DS_INVALID_DMD syscall.Errno = 8360 + ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 + ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 + ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 + ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 + ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 + ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 + ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 + ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 + ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 + ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 + ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 + ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 + ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 + ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 + ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 + ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 + ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 + ERROR_DS_DUP_RDN syscall.Errno = 8378 + ERROR_DS_DUP_OID syscall.Errno = 8379 + ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 + ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 + ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 + ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 + ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 + ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 + ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 + ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 + ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 + ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 + ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 + ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 + ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 + ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 + ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 + ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 + ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 + ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 + ERROR_DS_CANT_DELETE syscall.Errno = 8398 + ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 + ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 + ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 + ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 + ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 + ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 + ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 + ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 + ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 + ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 + ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 + ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 + ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 + ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 + ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 + ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 + ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 + ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 + ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 + ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 + ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 + ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 + ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 + ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 + ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 + ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 + ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 + ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 + ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 + ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 + ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 + ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 + ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 + ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 + ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 + ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 + ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 + ERROR_DS_DRA_GENERIC syscall.Errno = 8436 + ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 + ERROR_DS_DRA_BUSY syscall.Errno = 8438 + ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 + ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 + ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 + ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 + ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 + ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 + ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 + ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 + ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 + ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 + ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 + ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 + ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 + ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 + ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 + ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 + ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 + ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 + ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 + ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 + ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 + ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 + ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 + ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 + ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 + ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 + ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 + ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 + ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 + ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 + ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 + ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 + ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 + ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 + ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 + ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 + ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 + ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 + ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 + ERROR_DS_DS_REQUIRED syscall.Errno = 8478 + ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 + ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 + ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 + ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 + ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 + ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 + ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 + ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 + ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 + ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 + ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 + ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 + ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 + ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 + ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 + ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 + ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 + ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 + ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 + ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 + ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 + ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 + ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 + ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 + ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 + ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 + ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 + ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 + ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 + ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 + ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 + ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 + ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 + ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 + ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 + ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 + ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 + ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 + ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 + ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 + ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 + ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 + ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 + ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 + ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 + ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 + ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 + ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 + ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 + ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 + ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 + ERROR_DS_CANT_START syscall.Errno = 8531 + ERROR_DS_INIT_FAILURE syscall.Errno = 8532 + ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 + ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 + ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 + ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 + ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 + ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 + ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 + ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 + ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 + ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 + ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 + ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 + ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 + ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 + ERROR_DS_GC_REQUIRED syscall.Errno = 8547 + ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 + ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 + ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 + ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 + ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 + ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 + ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 + ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 + ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 + ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 + ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 + ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 + ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 + ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 + ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 + ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 + ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 + ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 + ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 + ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 + ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 + ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 + ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 + ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 + ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 + ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 + ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 + ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 + ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 + ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 + ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 + ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 + ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 + ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 + ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 + ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 + ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 + ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 + ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 + ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 + ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 + ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 + ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 + ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 + ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 + ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 + ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 + ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 + ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 + ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 + ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 + ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 + ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 + ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 + ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 + ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 + ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 + ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 + ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 + ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 + ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 + ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 + ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 + ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 + ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 + ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 + ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 + ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 + ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 + ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 + ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 + ERROR_NO_SECRETS syscall.Errno = 8620 + ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 + ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 + ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 + ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 + ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 + ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 + ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 + ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 + ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 + ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 + ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 + ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 + ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 + ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 + ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 + ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 + ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 + ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 + ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 + ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 + ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 + ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 + ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 + ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 + ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 + ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 + ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 + ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 + ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 + ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 + DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 + DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS + DNS_ERROR_MASK syscall.Errno = 0x00002328 + DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 + DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 + DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 + DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 + DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 + DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 + DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 + DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 + DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 + DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 + DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 + DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 + DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 + DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME + DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 + DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 + DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 + DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 + DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 + DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 + DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 + DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 + DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 + DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 + DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 + DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 + DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 + DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 + DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 + DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 + DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 + DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 + DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 + DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 + DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 + DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 + DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 + DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 + DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 + DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 + DNS_ERROR_INVALID_XML syscall.Errno = 9126 + DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 + DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 + DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 + DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 + DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 + DNS_INFO_NO_RECORDS syscall.Errno = 9501 + DNS_ERROR_BAD_PACKET syscall.Errno = 9502 + DNS_ERROR_NO_PACKET syscall.Errno = 9503 + DNS_ERROR_RCODE syscall.Errno = 9504 + DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 + DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET + DNS_REQUEST_PENDING syscall.Errno = 9506 + DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY + DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME + DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA + DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 + DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 + DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 + DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 + DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 + DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 + DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 + DNS_STATUS_FQDN syscall.Errno = 9557 + DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 + DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 + DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 + DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 + DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 + DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 + DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 + DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 + DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 + DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 + DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 + DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 + DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 + DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 + DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 + DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 + DNS_ERROR_ZONE_BASE syscall.Errno = 9600 + DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 + DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 + DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 + DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 + DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 + DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 + DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 + DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 + DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 + DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 + DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 + DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 + DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 + DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 + DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 + DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 + DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 + DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 + DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 + DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 + DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 + DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 + DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 + DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 + DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 + DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 + DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 + DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 + DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 + DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 + DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 + DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 + DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 + DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 + DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 + DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 + DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 + DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 + DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 + DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 + DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 + DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 + DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 + DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 + DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 + DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 + DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 + DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 + DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 + DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 + DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 + DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 + DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 + DNS_ERROR_AXFR syscall.Errno = 9752 + DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 + DNS_ERROR_SECURE_BASE syscall.Errno = 9800 + DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 + DNS_ERROR_SETUP_BASE syscall.Errno = 9850 + DNS_ERROR_NO_TCPIP syscall.Errno = 9851 + DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 + DNS_ERROR_DP_BASE syscall.Errno = 9900 + DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 + DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 + DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 + DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 + DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 + DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 + DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 + DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 + DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 + DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 + DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 + DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 + DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 + DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 + DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 + DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 + DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 + DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 + DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 + DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 + DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 + DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 + DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 + DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 + DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 + DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 + DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 + DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 + DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 + DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 + DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 + DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 + DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 + DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 + DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 + DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 + DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 + DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 + DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 + DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 + DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 + DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 + DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 + DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 + DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 + DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 + DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 + DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 + DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 + DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 + DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 + DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 + DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 + DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 + DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 + DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 + DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 + WSABASEERR syscall.Errno = 10000 + WSAEINTR syscall.Errno = 10004 + WSAEBADF syscall.Errno = 10009 + WSAEACCES syscall.Errno = 10013 + WSAEFAULT syscall.Errno = 10014 + WSAEINVAL syscall.Errno = 10022 + WSAEMFILE syscall.Errno = 10024 + WSAEWOULDBLOCK syscall.Errno = 10035 + WSAEINPROGRESS syscall.Errno = 10036 + WSAEALREADY syscall.Errno = 10037 + WSAENOTSOCK syscall.Errno = 10038 + WSAEDESTADDRREQ syscall.Errno = 10039 + WSAEMSGSIZE syscall.Errno = 10040 + WSAEPROTOTYPE syscall.Errno = 10041 + WSAENOPROTOOPT syscall.Errno = 10042 + WSAEPROTONOSUPPORT syscall.Errno = 10043 + WSAESOCKTNOSUPPORT syscall.Errno = 10044 + WSAEOPNOTSUPP syscall.Errno = 10045 + WSAEPFNOSUPPORT syscall.Errno = 10046 + WSAEAFNOSUPPORT syscall.Errno = 10047 + WSAEADDRINUSE syscall.Errno = 10048 + WSAEADDRNOTAVAIL syscall.Errno = 10049 + WSAENETDOWN syscall.Errno = 10050 + WSAENETUNREACH syscall.Errno = 10051 + WSAENETRESET syscall.Errno = 10052 + WSAECONNABORTED syscall.Errno = 10053 + WSAECONNRESET syscall.Errno = 10054 + WSAENOBUFS syscall.Errno = 10055 + WSAEISCONN syscall.Errno = 10056 + WSAENOTCONN syscall.Errno = 10057 + WSAESHUTDOWN syscall.Errno = 10058 + WSAETOOMANYREFS syscall.Errno = 10059 + WSAETIMEDOUT syscall.Errno = 10060 + WSAECONNREFUSED syscall.Errno = 10061 + WSAELOOP syscall.Errno = 10062 + WSAENAMETOOLONG syscall.Errno = 10063 + WSAEHOSTDOWN syscall.Errno = 10064 + WSAEHOSTUNREACH syscall.Errno = 10065 + WSAENOTEMPTY syscall.Errno = 10066 + WSAEPROCLIM syscall.Errno = 10067 + WSAEUSERS syscall.Errno = 10068 + WSAEDQUOT syscall.Errno = 10069 + WSAESTALE syscall.Errno = 10070 + WSAEREMOTE syscall.Errno = 10071 + WSASYSNOTREADY syscall.Errno = 10091 + WSAVERNOTSUPPORTED syscall.Errno = 10092 + WSANOTINITIALISED syscall.Errno = 10093 + WSAEDISCON syscall.Errno = 10101 + WSAENOMORE syscall.Errno = 10102 + WSAECANCELLED syscall.Errno = 10103 + WSAEINVALIDPROCTABLE syscall.Errno = 10104 + WSAEINVALIDPROVIDER syscall.Errno = 10105 + WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 + WSASYSCALLFAILURE syscall.Errno = 10107 + WSASERVICE_NOT_FOUND syscall.Errno = 10108 + WSATYPE_NOT_FOUND syscall.Errno = 10109 + WSA_E_NO_MORE syscall.Errno = 10110 + WSA_E_CANCELLED syscall.Errno = 10111 + WSAEREFUSED syscall.Errno = 10112 + WSAHOST_NOT_FOUND syscall.Errno = 11001 + WSATRY_AGAIN syscall.Errno = 11002 + WSANO_RECOVERY syscall.Errno = 11003 + WSANO_DATA syscall.Errno = 11004 + WSA_QOS_RECEIVERS syscall.Errno = 11005 + WSA_QOS_SENDERS syscall.Errno = 11006 + WSA_QOS_NO_SENDERS syscall.Errno = 11007 + WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 + WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 + WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 + WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 + WSA_QOS_BAD_STYLE syscall.Errno = 11012 + WSA_QOS_BAD_OBJECT syscall.Errno = 11013 + WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 + WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 + WSA_QOS_ESERVICETYPE syscall.Errno = 11016 + WSA_QOS_EFLOWSPEC syscall.Errno = 11017 + WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 + WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 + WSA_QOS_EFILTERTYPE syscall.Errno = 11020 + WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 + WSA_QOS_EOBJLENGTH syscall.Errno = 11022 + WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 + WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 + WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 + WSA_QOS_EFLOWDESC syscall.Errno = 11026 + WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 + WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 + WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 + WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 + WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 + WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 + WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 + ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 + ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 + ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 + ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 + ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 + ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 + ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 + ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 + ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 + ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 + ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 + ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 + ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 + ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 + ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 + ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 + ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 + ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 + ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 + ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 + ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 + ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 + ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 + ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 + WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 + WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 + ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 + ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 + ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 + ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 + ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 + ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 + ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 + ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 + ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 + ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 + ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 + ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 + ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 + ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 + ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 + ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 + ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 + ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 + ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 + ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 + ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 + ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 + ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 + ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 + ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 + ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 + ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 + ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 + ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 + ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 + ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 + ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 + ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 + ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 + ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 + ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 + ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 + ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 + ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 + ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 + ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 + ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 + ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 + ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 + ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 + ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 + ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 + ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 + ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 + ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 + ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 + ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 + ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 + ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 + ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 + ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 + ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 + ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 + ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 + ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 + ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 + ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 + ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 + ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 + ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 + ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 + ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 + ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 + ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 + ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 + ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 + ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 + ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 + ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 + ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 + ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 + ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 + ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 + ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 + ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 + ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 + ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 + ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 + ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 + ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 + ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 + ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 + ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 + ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 + ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 + ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 + ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 + ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 + ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 + ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 + ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 + ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 + ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 + ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 + ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 + ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 + ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 + ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 + ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 + ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 + ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 + ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 + ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 + ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 + ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 + ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 + ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 + ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 + ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 + ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 + ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 + ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 + ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 + ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 + ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 + ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 + ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 + ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 + ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 + ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 + ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 + ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 + ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 + ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 + ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 + ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 + ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 + ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 + ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 + ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 + ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 + ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 + ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 + ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 + ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 + ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 + ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 + ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 + ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 + ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 + ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 + ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 + ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 + ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 + ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 + ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 + ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 + ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 + ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 + ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 + ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 + ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 + ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 + ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 + ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 + ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 + ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 + ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 + ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 + ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 + ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 + ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 + ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 + ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 + ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 + ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 + ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 + ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 + ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 + ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 + ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 + ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 + ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 + ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 + ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 + ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 + ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 + ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 + ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 + ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 + ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 + ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 + ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 + ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 + ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 + ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 + ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 + ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 + ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 + ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 + ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 + ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 + ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 + ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 + ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 + ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 + ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 + ERROR_SXS_CORRUPTION syscall.Errno = 14083 + ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 + ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 + ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 + ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 + ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 + ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 + ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 + ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 + ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 + ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 + ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 + ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 + ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 + ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 + ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 + ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 + ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 + ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 + ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 + ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 + ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 + ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 + ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 + ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 + ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 + ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 + ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 + ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 + ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 + ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 + ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 + ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 + ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 + ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 + ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 + ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 + ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 + ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 + ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 + ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 + ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 + ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 + ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 + ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 + ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 + ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 + ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 + ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 + ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 + ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 + ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 + ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 + ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 + ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 + ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 + ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 + ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 + ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 + ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 + ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 + ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 + ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 + ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 + ERROR_EC_LOG_DISABLED syscall.Errno = 15081 + ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 + ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 + ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 + ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 + ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 + ERROR_MUI_INVALID_FILE syscall.Errno = 15101 + ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 + ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 + ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 + ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 + ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 + ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 + ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 + ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 + ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 + ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 + ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 + ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 + ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 + ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 + ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 + ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 + ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 + ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 + ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 + ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 + ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 + ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 + ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 + ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 + ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 + ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 + ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 + ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 + ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 + ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 + ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 + ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 + ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 + ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 + ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 + ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 + ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 + ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 + ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 + ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 + ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 + ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 + ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 + ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 + ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 + ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 + ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 + ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 + ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 + ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 + ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 + ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 + ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 + ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 + ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 + ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 + ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 + ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 + ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 + ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 + ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 + ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 + ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 + ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 + ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 + ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 + ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 + ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 + ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 + ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 + ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 + ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 + ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 + ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 + ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 + ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 + ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 + ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 + ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 + ERROR_INSTALL_CANCEL syscall.Errno = 15608 + ERROR_INSTALL_FAILED syscall.Errno = 15609 + ERROR_REMOVE_FAILED syscall.Errno = 15610 + ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 + ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 + ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 + ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 + ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 + ERROR_PACKAGE_UPDATING syscall.Errno = 15616 + ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 + ERROR_PACKAGES_IN_USE syscall.Errno = 15618 + ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 + ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 + ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 + ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 + ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 + ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 + ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 + ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 + ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 + ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 + ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 + ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 + ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 + ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 + ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 + ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 + ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 + ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 + ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 + ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 + ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 + ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 + ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 + ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 + ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 + ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 + ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 + APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 + APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 + APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 + APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 + APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 + APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 + APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 + ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 + ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 + ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 + ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 + ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 + ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 + ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 + ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 + ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 + ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 + ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 + ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 + ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 + ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 + ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 + ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 + ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 + ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 + ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 + ERROR_API_UNAVAILABLE syscall.Errno = 15841 + STORE_ERROR_UNLICENSED syscall.Errno = 15861 + STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 + STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 + STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 + SEVERITY_SUCCESS syscall.Errno = 0 + SEVERITY_ERROR syscall.Errno = 1 + FACILITY_NT_BIT = 0x10000000 + E_NOT_SET = ERROR_NOT_FOUND + E_NOT_VALID_STATE = ERROR_INVALID_STATE + E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER + E_TIME_CRITICAL_THREAD = ERROR_TIME_CRITICAL_THREAD + NOERROR syscall.Errno = 0 + E_UNEXPECTED Handle = 0x8000FFFF + E_NOTIMPL Handle = 0x80004001 + E_OUTOFMEMORY Handle = 0x8007000E + E_INVALIDARG Handle = 0x80070057 + E_NOINTERFACE Handle = 0x80004002 + E_POINTER Handle = 0x80004003 + E_HANDLE Handle = 0x80070006 + E_ABORT Handle = 0x80004004 + E_FAIL Handle = 0x80004005 + E_ACCESSDENIED Handle = 0x80070005 + E_PENDING Handle = 0x8000000A + E_BOUNDS Handle = 0x8000000B + E_CHANGED_STATE Handle = 0x8000000C + E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D + E_ILLEGAL_METHOD_CALL Handle = 0x8000000E + RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F + RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 + RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 + RO_E_INVALID_METADATA_FILE Handle = 0x80000012 + RO_E_CLOSED Handle = 0x80000013 + RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 + RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 + RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 + E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 + E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 + E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 + E_APPLICATION_EXITING Handle = 0x8000001A + E_APPLICATION_VIEW_EXITING Handle = 0x8000001B + RO_E_MUST_BE_AGILE Handle = 0x8000001C + RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D + RO_E_COMMITTED Handle = 0x8000001E + RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F + RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 + RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 + CO_E_INIT_TLS Handle = 0x80004006 + CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 + CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 + CO_E_INIT_CLASS_CACHE Handle = 0x80004009 + CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A + CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B + CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C + CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D + CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E + CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F + CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 + CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 + CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 + CO_E_CANT_REMOTE Handle = 0x80004013 + CO_E_BAD_SERVER_NAME Handle = 0x80004014 + CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 + CO_E_OLE1DDE_DISABLED Handle = 0x80004016 + CO_E_RUNAS_SYNTAX Handle = 0x80004017 + CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 + CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 + CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A + CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B + CO_E_START_SERVICE_FAILURE Handle = 0x8000401C + CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D + CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E + CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F + CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 + CO_E_NOT_SUPPORTED Handle = 0x80004021 + CO_E_RELOAD_DLL Handle = 0x80004022 + CO_E_MSI_ERROR Handle = 0x80004023 + CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 + CO_E_SERVER_PAUSED Handle = 0x80004025 + CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 + CO_E_CLASS_DISABLED Handle = 0x80004027 + CO_E_CLRNOTAVAILABLE Handle = 0x80004028 + CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 + CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A + CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B + CO_E_TRACKER_CONFIG Handle = 0x80004030 + CO_E_THREADPOOL_CONFIG Handle = 0x80004031 + CO_E_SXS_CONFIG Handle = 0x80004032 + CO_E_MALFORMED_SPN Handle = 0x80004033 + CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 + CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 + S_OK Handle = 0 + S_FALSE Handle = 1 + OLE_E_FIRST Handle = 0x80040000 + OLE_E_LAST Handle = 0x800400FF + OLE_S_FIRST Handle = 0x00040000 + OLE_S_LAST Handle = 0x000400FF + OLE_E_OLEVERB Handle = 0x80040000 + OLE_E_ADVF Handle = 0x80040001 + OLE_E_ENUM_NOMORE Handle = 0x80040002 + OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 + OLE_E_NOCONNECTION Handle = 0x80040004 + OLE_E_NOTRUNNING Handle = 0x80040005 + OLE_E_NOCACHE Handle = 0x80040006 + OLE_E_BLANK Handle = 0x80040007 + OLE_E_CLASSDIFF Handle = 0x80040008 + OLE_E_CANT_GETMONIKER Handle = 0x80040009 + OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A + OLE_E_STATIC Handle = 0x8004000B + OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C + OLE_E_INVALIDRECT Handle = 0x8004000D + OLE_E_WRONGCOMPOBJ Handle = 0x8004000E + OLE_E_INVALIDHWND Handle = 0x8004000F + OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 + OLE_E_CANTCONVERT Handle = 0x80040011 + OLE_E_NOSTORAGE Handle = 0x80040012 + DV_E_FORMATETC Handle = 0x80040064 + DV_E_DVTARGETDEVICE Handle = 0x80040065 + DV_E_STGMEDIUM Handle = 0x80040066 + DV_E_STATDATA Handle = 0x80040067 + DV_E_LINDEX Handle = 0x80040068 + DV_E_TYMED Handle = 0x80040069 + DV_E_CLIPFORMAT Handle = 0x8004006A + DV_E_DVASPECT Handle = 0x8004006B + DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C + DV_E_NOIVIEWOBJECT Handle = 0x8004006D + DRAGDROP_E_FIRST syscall.Errno = 0x80040100 + DRAGDROP_E_LAST syscall.Errno = 0x8004010F + DRAGDROP_S_FIRST syscall.Errno = 0x00040100 + DRAGDROP_S_LAST syscall.Errno = 0x0004010F + DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 + DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 + DRAGDROP_E_INVALIDHWND Handle = 0x80040102 + DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 + CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 + CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F + CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 + CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F + CLASS_E_NOAGGREGATION Handle = 0x80040110 + CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 + CLASS_E_NOTLICENSED Handle = 0x80040112 + MARSHAL_E_FIRST syscall.Errno = 0x80040120 + MARSHAL_E_LAST syscall.Errno = 0x8004012F + MARSHAL_S_FIRST syscall.Errno = 0x00040120 + MARSHAL_S_LAST syscall.Errno = 0x0004012F + DATA_E_FIRST syscall.Errno = 0x80040130 + DATA_E_LAST syscall.Errno = 0x8004013F + DATA_S_FIRST syscall.Errno = 0x00040130 + DATA_S_LAST syscall.Errno = 0x0004013F + VIEW_E_FIRST syscall.Errno = 0x80040140 + VIEW_E_LAST syscall.Errno = 0x8004014F + VIEW_S_FIRST syscall.Errno = 0x00040140 + VIEW_S_LAST syscall.Errno = 0x0004014F + VIEW_E_DRAW Handle = 0x80040140 + REGDB_E_FIRST syscall.Errno = 0x80040150 + REGDB_E_LAST syscall.Errno = 0x8004015F + REGDB_S_FIRST syscall.Errno = 0x00040150 + REGDB_S_LAST syscall.Errno = 0x0004015F + REGDB_E_READREGDB Handle = 0x80040150 + REGDB_E_WRITEREGDB Handle = 0x80040151 + REGDB_E_KEYMISSING Handle = 0x80040152 + REGDB_E_INVALIDVALUE Handle = 0x80040153 + REGDB_E_CLASSNOTREG Handle = 0x80040154 + REGDB_E_IIDNOTREG Handle = 0x80040155 + REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 + REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 + CAT_E_FIRST syscall.Errno = 0x80040160 + CAT_E_LAST syscall.Errno = 0x80040161 + CAT_E_CATIDNOEXIST Handle = 0x80040160 + CAT_E_NODESCRIPTION Handle = 0x80040161 + CS_E_FIRST syscall.Errno = 0x80040164 + CS_E_LAST syscall.Errno = 0x8004016F + CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 + CS_E_NOT_DELETABLE Handle = 0x80040165 + CS_E_CLASS_NOTFOUND Handle = 0x80040166 + CS_E_INVALID_VERSION Handle = 0x80040167 + CS_E_NO_CLASSSTORE Handle = 0x80040168 + CS_E_OBJECT_NOTFOUND Handle = 0x80040169 + CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A + CS_E_INVALID_PATH Handle = 0x8004016B + CS_E_NETWORK_ERROR Handle = 0x8004016C + CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D + CS_E_SCHEMA_MISMATCH Handle = 0x8004016E + CS_E_INTERNAL_ERROR Handle = 0x8004016F + CACHE_E_FIRST syscall.Errno = 0x80040170 + CACHE_E_LAST syscall.Errno = 0x8004017F + CACHE_S_FIRST syscall.Errno = 0x00040170 + CACHE_S_LAST syscall.Errno = 0x0004017F + CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 + OLEOBJ_E_FIRST syscall.Errno = 0x80040180 + OLEOBJ_E_LAST syscall.Errno = 0x8004018F + OLEOBJ_S_FIRST syscall.Errno = 0x00040180 + OLEOBJ_S_LAST syscall.Errno = 0x0004018F + OLEOBJ_E_NOVERBS Handle = 0x80040180 + OLEOBJ_E_INVALIDVERB Handle = 0x80040181 + CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 + CLIENTSITE_E_LAST syscall.Errno = 0x8004019F + CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 + CLIENTSITE_S_LAST syscall.Errno = 0x0004019F + INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 + INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 + INPLACE_E_FIRST syscall.Errno = 0x800401A0 + INPLACE_E_LAST syscall.Errno = 0x800401AF + INPLACE_S_FIRST syscall.Errno = 0x000401A0 + INPLACE_S_LAST syscall.Errno = 0x000401AF + ENUM_E_FIRST syscall.Errno = 0x800401B0 + ENUM_E_LAST syscall.Errno = 0x800401BF + ENUM_S_FIRST syscall.Errno = 0x000401B0 + ENUM_S_LAST syscall.Errno = 0x000401BF + CONVERT10_E_FIRST syscall.Errno = 0x800401C0 + CONVERT10_E_LAST syscall.Errno = 0x800401CF + CONVERT10_S_FIRST syscall.Errno = 0x000401C0 + CONVERT10_S_LAST syscall.Errno = 0x000401CF + CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 + CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 + CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 + CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 + CONVERT10_E_STG_FMT Handle = 0x800401C4 + CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 + CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 + CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 + CLIPBRD_E_LAST syscall.Errno = 0x800401DF + CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 + CLIPBRD_S_LAST syscall.Errno = 0x000401DF + CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 + CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 + CLIPBRD_E_CANT_SET Handle = 0x800401D2 + CLIPBRD_E_BAD_DATA Handle = 0x800401D3 + CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 + MK_E_FIRST syscall.Errno = 0x800401E0 + MK_E_LAST syscall.Errno = 0x800401EF + MK_S_FIRST syscall.Errno = 0x000401E0 + MK_S_LAST syscall.Errno = 0x000401EF + MK_E_CONNECTMANUALLY Handle = 0x800401E0 + MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 + MK_E_NEEDGENERIC Handle = 0x800401E2 + MK_E_UNAVAILABLE Handle = 0x800401E3 + MK_E_SYNTAX Handle = 0x800401E4 + MK_E_NOOBJECT Handle = 0x800401E5 + MK_E_INVALIDEXTENSION Handle = 0x800401E6 + MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 + MK_E_NOTBINDABLE Handle = 0x800401E8 + MK_E_NOTBOUND Handle = 0x800401E9 + MK_E_CANTOPENFILE Handle = 0x800401EA + MK_E_MUSTBOTHERUSER Handle = 0x800401EB + MK_E_NOINVERSE Handle = 0x800401EC + MK_E_NOSTORAGE Handle = 0x800401ED + MK_E_NOPREFIX Handle = 0x800401EE + MK_E_ENUMERATION_FAILED Handle = 0x800401EF + CO_E_FIRST syscall.Errno = 0x800401F0 + CO_E_LAST syscall.Errno = 0x800401FF + CO_S_FIRST syscall.Errno = 0x000401F0 + CO_S_LAST syscall.Errno = 0x000401FF + CO_E_NOTINITIALIZED Handle = 0x800401F0 + CO_E_ALREADYINITIALIZED Handle = 0x800401F1 + CO_E_CANTDETERMINECLASS Handle = 0x800401F2 + CO_E_CLASSSTRING Handle = 0x800401F3 + CO_E_IIDSTRING Handle = 0x800401F4 + CO_E_APPNOTFOUND Handle = 0x800401F5 + CO_E_APPSINGLEUSE Handle = 0x800401F6 + CO_E_ERRORINAPP Handle = 0x800401F7 + CO_E_DLLNOTFOUND Handle = 0x800401F8 + CO_E_ERRORINDLL Handle = 0x800401F9 + CO_E_WRONGOSFORAPP Handle = 0x800401FA + CO_E_OBJNOTREG Handle = 0x800401FB + CO_E_OBJISREG Handle = 0x800401FC + CO_E_OBJNOTCONNECTED Handle = 0x800401FD + CO_E_APPDIDNTREG Handle = 0x800401FE + CO_E_RELEASED Handle = 0x800401FF + EVENT_E_FIRST syscall.Errno = 0x80040200 + EVENT_E_LAST syscall.Errno = 0x8004021F + EVENT_S_FIRST syscall.Errno = 0x00040200 + EVENT_S_LAST syscall.Errno = 0x0004021F + EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 + EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 + EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 + EVENT_E_QUERYSYNTAX Handle = 0x80040203 + EVENT_E_QUERYFIELD Handle = 0x80040204 + EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 + EVENT_E_INTERNALERROR Handle = 0x80040206 + EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 + EVENT_E_USER_EXCEPTION Handle = 0x80040208 + EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 + EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A + EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B + EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C + EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D + EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E + EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F + EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 + TPC_E_INVALID_PROPERTY Handle = 0x80040241 + TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 + TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B + TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 + TPC_E_INVALID_STROKE Handle = 0x80040222 + TPC_E_INITIALIZE_FAIL Handle = 0x80040223 + TPC_E_NOT_RELEVANT Handle = 0x80040232 + TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 + TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 + TPC_E_INVALID_RIGHTS Handle = 0x80040236 + TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 + TPC_E_QUEUE_FULL Handle = 0x80040238 + TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 + TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A + TPC_S_TRUNCATED Handle = 0x00040252 + TPC_S_INTERRUPTED Handle = 0x00040253 + TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 + XACT_E_FIRST syscall.Errno = 0x8004D000 + XACT_E_LAST syscall.Errno = 0x8004D02B + XACT_S_FIRST syscall.Errno = 0x0004D000 + XACT_S_LAST syscall.Errno = 0x0004D010 + XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 + XACT_E_CANTRETAIN Handle = 0x8004D001 + XACT_E_COMMITFAILED Handle = 0x8004D002 + XACT_E_COMMITPREVENTED Handle = 0x8004D003 + XACT_E_HEURISTICABORT Handle = 0x8004D004 + XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 + XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 + XACT_E_HEURISTICDANGER Handle = 0x8004D007 + XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 + XACT_E_NOASYNC Handle = 0x8004D009 + XACT_E_NOENLIST Handle = 0x8004D00A + XACT_E_NOISORETAIN Handle = 0x8004D00B + XACT_E_NORESOURCE Handle = 0x8004D00C + XACT_E_NOTCURRENT Handle = 0x8004D00D + XACT_E_NOTRANSACTION Handle = 0x8004D00E + XACT_E_NOTSUPPORTED Handle = 0x8004D00F + XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 + XACT_E_WRONGSTATE Handle = 0x8004D011 + XACT_E_WRONGUOW Handle = 0x8004D012 + XACT_E_XTIONEXISTS Handle = 0x8004D013 + XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 + XACT_E_INVALIDCOOKIE Handle = 0x8004D015 + XACT_E_INDOUBT Handle = 0x8004D016 + XACT_E_NOTIMEOUT Handle = 0x8004D017 + XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 + XACT_E_ABORTED Handle = 0x8004D019 + XACT_E_LOGFULL Handle = 0x8004D01A + XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B + XACT_E_CONNECTION_DOWN Handle = 0x8004D01C + XACT_E_CONNECTION_DENIED Handle = 0x8004D01D + XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E + XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F + XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 + XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 + XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 + XACT_E_TIP_DISABLED Handle = 0x8004D023 + XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 + XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 + XACT_E_XA_TX_DISABLED Handle = 0x8004D026 + XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 + XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 + XACT_E_ABORTING Handle = 0x8004D029 + XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A + XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B + XACT_E_LU_TX_DISABLED Handle = 0x8004D02C + XACT_E_CLERKNOTFOUND Handle = 0x8004D080 + XACT_E_CLERKEXISTS Handle = 0x8004D081 + XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 + XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 + XACT_E_INVALIDLSN Handle = 0x8004D084 + XACT_E_REPLAYREQUEST Handle = 0x8004D085 + XACT_S_ASYNC Handle = 0x0004D000 + XACT_S_DEFECT Handle = 0x0004D001 + XACT_S_READONLY Handle = 0x0004D002 + XACT_S_SOMENORETAIN Handle = 0x0004D003 + XACT_S_OKINFORM Handle = 0x0004D004 + XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 + XACT_S_MADECHANGESINFORM Handle = 0x0004D006 + XACT_S_ALLNORETAIN Handle = 0x0004D007 + XACT_S_ABORTING Handle = 0x0004D008 + XACT_S_SINGLEPHASE Handle = 0x0004D009 + XACT_S_LOCALLY_OK Handle = 0x0004D00A + XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 + CONTEXT_E_FIRST syscall.Errno = 0x8004E000 + CONTEXT_E_LAST syscall.Errno = 0x8004E02F + CONTEXT_S_FIRST syscall.Errno = 0x0004E000 + CONTEXT_S_LAST syscall.Errno = 0x0004E02F + CONTEXT_E_ABORTED Handle = 0x8004E002 + CONTEXT_E_ABORTING Handle = 0x8004E003 + CONTEXT_E_NOCONTEXT Handle = 0x8004E004 + CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 + CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 + CONTEXT_E_OLDREF Handle = 0x8004E007 + CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C + CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F + CO_E_ACTIVATIONFAILED Handle = 0x8004E021 + CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 + CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 + CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 + CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 + CONTEXT_E_NOJIT Handle = 0x8004E026 + CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 + CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 + CO_E_NOIISINTRINSICS Handle = 0x8004E029 + CO_E_NOCOOKIES Handle = 0x8004E02A + CO_E_DBERROR Handle = 0x8004E02B + CO_E_NOTPOOLED Handle = 0x8004E02C + CO_E_NOTCONSTRUCTED Handle = 0x8004E02D + CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E + CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F + CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 + CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 + OLE_S_USEREG Handle = 0x00040000 + OLE_S_STATIC Handle = 0x00040001 + OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 + DRAGDROP_S_DROP Handle = 0x00040100 + DRAGDROP_S_CANCEL Handle = 0x00040101 + DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 + DATA_S_SAMEFORMATETC Handle = 0x00040130 + VIEW_S_ALREADY_FROZEN Handle = 0x00040140 + CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 + CACHE_S_SAMECACHE Handle = 0x00040171 + CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 + OLEOBJ_S_INVALIDVERB Handle = 0x00040180 + OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 + OLEOBJ_S_INVALIDHWND Handle = 0x00040182 + INPLACE_S_TRUNCATED Handle = 0x000401A0 + CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 + MK_S_REDUCED_TO_SELF Handle = 0x000401E2 + MK_S_ME Handle = 0x000401E4 + MK_S_HIM Handle = 0x000401E5 + MK_S_US Handle = 0x000401E6 + MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 + SCHED_S_TASK_READY Handle = 0x00041300 + SCHED_S_TASK_RUNNING Handle = 0x00041301 + SCHED_S_TASK_DISABLED Handle = 0x00041302 + SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 + SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 + SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 + SCHED_S_TASK_TERMINATED Handle = 0x00041306 + SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 + SCHED_S_EVENT_TRIGGER Handle = 0x00041308 + SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 + SCHED_E_TASK_NOT_READY Handle = 0x8004130A + SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B + SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C + SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D + SCHED_E_INVALID_TASK Handle = 0x8004130E + SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F + SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 + SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 + SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 + SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 + SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 + SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 + SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 + SCHED_E_NAMESPACE Handle = 0x80041317 + SCHED_E_INVALIDVALUE Handle = 0x80041318 + SCHED_E_MISSINGNODE Handle = 0x80041319 + SCHED_E_MALFORMEDXML Handle = 0x8004131A + SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B + SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C + SCHED_E_TOO_MANY_NODES Handle = 0x8004131D + SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E + SCHED_E_ALREADY_RUNNING Handle = 0x8004131F + SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 + SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 + SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 + SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 + SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 + SCHED_S_TASK_QUEUED Handle = 0x00041325 + SCHED_E_TASK_DISABLED Handle = 0x80041326 + SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 + SCHED_E_START_ON_DEMAND Handle = 0x80041328 + SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 + SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 + CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 + CO_E_SCM_ERROR Handle = 0x80080002 + CO_E_SCM_RPC_FAILURE Handle = 0x80080003 + CO_E_BAD_PATH Handle = 0x80080004 + CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 + CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 + MK_E_NO_NORMALIZED Handle = 0x80080007 + CO_E_SERVER_STOPPING Handle = 0x80080008 + MEM_E_INVALID_ROOT Handle = 0x80080009 + MEM_E_INVALID_LINK Handle = 0x80080010 + MEM_E_INVALID_SIZE Handle = 0x80080011 + CO_S_NOTALLINTERFACES Handle = 0x00080012 + CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 + CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 + CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 + CO_E_ELEVATION_DISABLED Handle = 0x80080017 + APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 + APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 + APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 + APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 + APPX_E_INVALID_MANIFEST Handle = 0x80080204 + APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 + APPX_E_CORRUPT_CONTENT Handle = 0x80080206 + APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 + APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 + APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 + APPX_E_INVALID_KEY_INFO Handle = 0x8008020A + APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B + APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C + APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D + APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E + APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F + APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 + APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 + APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 + APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 + APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 + APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 + APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 + BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 + DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 + DISP_E_MEMBERNOTFOUND Handle = 0x80020003 + DISP_E_PARAMNOTFOUND Handle = 0x80020004 + DISP_E_TYPEMISMATCH Handle = 0x80020005 + DISP_E_UNKNOWNNAME Handle = 0x80020006 + DISP_E_NONAMEDARGS Handle = 0x80020007 + DISP_E_BADVARTYPE Handle = 0x80020008 + DISP_E_EXCEPTION Handle = 0x80020009 + DISP_E_OVERFLOW Handle = 0x8002000A + DISP_E_BADINDEX Handle = 0x8002000B + DISP_E_UNKNOWNLCID Handle = 0x8002000C + DISP_E_ARRAYISLOCKED Handle = 0x8002000D + DISP_E_BADPARAMCOUNT Handle = 0x8002000E + DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F + DISP_E_BADCALLEE Handle = 0x80020010 + DISP_E_NOTACOLLECTION Handle = 0x80020011 + DISP_E_DIVBYZERO Handle = 0x80020012 + DISP_E_BUFFERTOOSMALL Handle = 0x80020013 + TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 + TYPE_E_FIELDNOTFOUND Handle = 0x80028017 + TYPE_E_INVDATAREAD Handle = 0x80028018 + TYPE_E_UNSUPFORMAT Handle = 0x80028019 + TYPE_E_REGISTRYACCESS Handle = 0x8002801C + TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D + TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 + TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 + TYPE_E_INVALIDSTATE Handle = 0x80028029 + TYPE_E_WRONGTYPEKIND Handle = 0x8002802A + TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B + TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C + TYPE_E_NAMECONFLICT Handle = 0x8002802D + TYPE_E_UNKNOWNLCID Handle = 0x8002802E + TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F + TYPE_E_BADMODULEKIND Handle = 0x800288BD + TYPE_E_SIZETOOBIG Handle = 0x800288C5 + TYPE_E_DUPLICATEID Handle = 0x800288C6 + TYPE_E_INVALIDID Handle = 0x800288CF + TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 + TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 + TYPE_E_IOERROR Handle = 0x80028CA2 + TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 + TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A + TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 + TYPE_E_CIRCULARTYPE Handle = 0x80029C84 + STG_E_INVALIDFUNCTION Handle = 0x80030001 + STG_E_FILENOTFOUND Handle = 0x80030002 + STG_E_PATHNOTFOUND Handle = 0x80030003 + STG_E_TOOMANYOPENFILES Handle = 0x80030004 + STG_E_ACCESSDENIED Handle = 0x80030005 + STG_E_INVALIDHANDLE Handle = 0x80030006 + STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 + STG_E_INVALIDPOINTER Handle = 0x80030009 + STG_E_NOMOREFILES Handle = 0x80030012 + STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 + STG_E_SEEKERROR Handle = 0x80030019 + STG_E_WRITEFAULT Handle = 0x8003001D + STG_E_READFAULT Handle = 0x8003001E + STG_E_SHAREVIOLATION Handle = 0x80030020 + STG_E_LOCKVIOLATION Handle = 0x80030021 + STG_E_FILEALREADYEXISTS Handle = 0x80030050 + STG_E_INVALIDPARAMETER Handle = 0x80030057 + STG_E_MEDIUMFULL Handle = 0x80030070 + STG_E_PROPSETMISMATCHED Handle = 0x800300F0 + STG_E_ABNORMALAPIEXIT Handle = 0x800300FA + STG_E_INVALIDHEADER Handle = 0x800300FB + STG_E_INVALIDNAME Handle = 0x800300FC + STG_E_UNKNOWN Handle = 0x800300FD + STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE + STG_E_INVALIDFLAG Handle = 0x800300FF + STG_E_INUSE Handle = 0x80030100 + STG_E_NOTCURRENT Handle = 0x80030101 + STG_E_REVERTED Handle = 0x80030102 + STG_E_CANTSAVE Handle = 0x80030103 + STG_E_OLDFORMAT Handle = 0x80030104 + STG_E_OLDDLL Handle = 0x80030105 + STG_E_SHAREREQUIRED Handle = 0x80030106 + STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 + STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 + STG_E_DOCFILECORRUPT Handle = 0x80030109 + STG_E_BADBASEADDRESS Handle = 0x80030110 + STG_E_DOCFILETOOLARGE Handle = 0x80030111 + STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 + STG_E_INCOMPLETE Handle = 0x80030201 + STG_E_TERMINATED Handle = 0x80030202 + STG_S_CONVERTED Handle = 0x00030200 + STG_S_BLOCK Handle = 0x00030201 + STG_S_RETRYNOW Handle = 0x00030202 + STG_S_MONITORING Handle = 0x00030203 + STG_S_MULTIPLEOPENS Handle = 0x00030204 + STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 + STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 + STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 + STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 + STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 + STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A + STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 + STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 + STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 + STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 + STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 + STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A + STG_E_RESETS_EXHAUSTED Handle = 0x8003030B + RPC_E_CALL_REJECTED Handle = 0x80010001 + RPC_E_CALL_CANCELED Handle = 0x80010002 + RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 + RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 + RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 + RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 + RPC_E_SERVER_DIED Handle = 0x80010007 + RPC_E_CLIENT_DIED Handle = 0x80010008 + RPC_E_INVALID_DATAPACKET Handle = 0x80010009 + RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A + RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B + RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C + RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D + RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E + RPC_E_INVALID_DATA Handle = 0x8001000F + RPC_E_INVALID_PARAMETER Handle = 0x80010010 + RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 + RPC_E_SERVER_DIED_DNE Handle = 0x80010012 + RPC_E_SYS_CALL_FAILED Handle = 0x80010100 + RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 + RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 + RPC_E_NOT_REGISTERED Handle = 0x80010103 + RPC_E_FAULT Handle = 0x80010104 + RPC_E_SERVERFAULT Handle = 0x80010105 + RPC_E_CHANGED_MODE Handle = 0x80010106 + RPC_E_INVALIDMETHOD Handle = 0x80010107 + RPC_E_DISCONNECTED Handle = 0x80010108 + RPC_E_RETRY Handle = 0x80010109 + RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A + RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B + RPC_E_INVALID_CALLDATA Handle = 0x8001010C + RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D + RPC_E_WRONG_THREAD Handle = 0x8001010E + RPC_E_THREAD_NOT_INIT Handle = 0x8001010F + RPC_E_VERSION_MISMATCH Handle = 0x80010110 + RPC_E_INVALID_HEADER Handle = 0x80010111 + RPC_E_INVALID_EXTENSION Handle = 0x80010112 + RPC_E_INVALID_IPID Handle = 0x80010113 + RPC_E_INVALID_OBJECT Handle = 0x80010114 + RPC_S_CALLPENDING Handle = 0x80010115 + RPC_S_WAITONTIMER Handle = 0x80010116 + RPC_E_CALL_COMPLETE Handle = 0x80010117 + RPC_E_UNSECURE_CALL Handle = 0x80010118 + RPC_E_TOO_LATE Handle = 0x80010119 + RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A + RPC_E_ACCESS_DENIED Handle = 0x8001011B + RPC_E_REMOTE_DISABLED Handle = 0x8001011C + RPC_E_INVALID_OBJREF Handle = 0x8001011D + RPC_E_NO_CONTEXT Handle = 0x8001011E + RPC_E_TIMEOUT Handle = 0x8001011F + RPC_E_NO_SYNC Handle = 0x80010120 + RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 + RPC_E_INVALID_STD_NAME Handle = 0x80010122 + CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 + CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 + CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 + CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 + CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 + CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 + CO_E_FAILEDTOSETDACL Handle = 0x80010129 + CO_E_ACCESSCHECKFAILED Handle = 0x8001012A + CO_E_NETACCESSAPIFAILED Handle = 0x8001012B + CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C + CO_E_INVALIDSID Handle = 0x8001012D + CO_E_CONVERSIONFAILED Handle = 0x8001012E + CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F + CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 + CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 + CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 + CO_E_SETSERLHNDLFAILED Handle = 0x80010133 + CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 + CO_E_PATHTOOLONG Handle = 0x80010135 + CO_E_FAILEDTOGENUUID Handle = 0x80010136 + CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 + CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 + CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 + CO_E_ACESINWRONGORDER Handle = 0x8001013A + CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B + CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C + CO_E_DECODEFAILED Handle = 0x8001013D + CO_E_ACNOTINITIALIZED Handle = 0x8001013F + CO_E_CANCEL_DISABLED Handle = 0x80010140 + RPC_E_UNEXPECTED Handle = 0x8001FFFF + ERROR_AUDITING_DISABLED Handle = 0xC0090001 + ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 + ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 + NTE_BAD_UID Handle = 0x80090001 + NTE_BAD_HASH Handle = 0x80090002 + NTE_BAD_KEY Handle = 0x80090003 + NTE_BAD_LEN Handle = 0x80090004 + NTE_BAD_DATA Handle = 0x80090005 + NTE_BAD_SIGNATURE Handle = 0x80090006 + NTE_BAD_VER Handle = 0x80090007 + NTE_BAD_ALGID Handle = 0x80090008 + NTE_BAD_FLAGS Handle = 0x80090009 + NTE_BAD_TYPE Handle = 0x8009000A + NTE_BAD_KEY_STATE Handle = 0x8009000B + NTE_BAD_HASH_STATE Handle = 0x8009000C + NTE_NO_KEY Handle = 0x8009000D + NTE_NO_MEMORY Handle = 0x8009000E + NTE_EXISTS Handle = 0x8009000F + NTE_PERM Handle = 0x80090010 + NTE_NOT_FOUND Handle = 0x80090011 + NTE_DOUBLE_ENCRYPT Handle = 0x80090012 + NTE_BAD_PROVIDER Handle = 0x80090013 + NTE_BAD_PROV_TYPE Handle = 0x80090014 + NTE_BAD_PUBLIC_KEY Handle = 0x80090015 + NTE_BAD_KEYSET Handle = 0x80090016 + NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 + NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 + NTE_KEYSET_NOT_DEF Handle = 0x80090019 + NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A + NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B + NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C + NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D + NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E + NTE_BAD_KEYSET_PARAM Handle = 0x8009001F + NTE_FAIL Handle = 0x80090020 + NTE_SYS_ERR Handle = 0x80090021 + NTE_SILENT_CONTEXT Handle = 0x80090022 + NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 + NTE_TEMPORARY_PROFILE Handle = 0x80090024 + NTE_FIXEDPARAMETER Handle = 0x80090025 + NTE_INVALID_HANDLE Handle = 0x80090026 + NTE_INVALID_PARAMETER Handle = 0x80090027 + NTE_BUFFER_TOO_SMALL Handle = 0x80090028 + NTE_NOT_SUPPORTED Handle = 0x80090029 + NTE_NO_MORE_ITEMS Handle = 0x8009002A + NTE_BUFFERS_OVERLAP Handle = 0x8009002B + NTE_DECRYPTION_FAILURE Handle = 0x8009002C + NTE_INTERNAL_ERROR Handle = 0x8009002D + NTE_UI_REQUIRED Handle = 0x8009002E + NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F + NTE_DEVICE_NOT_READY Handle = 0x80090030 + NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 + NTE_VALIDATION_FAILED Handle = 0x80090032 + NTE_INCORRECT_PASSWORD Handle = 0x80090033 + NTE_ENCRYPTION_FAILURE Handle = 0x80090034 + NTE_DEVICE_NOT_FOUND Handle = 0x80090035 + NTE_USER_CANCELLED Handle = 0x80090036 + NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 + NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 + SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 + SEC_E_INVALID_HANDLE Handle = 0x80090301 + SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 + SEC_E_TARGET_UNKNOWN Handle = 0x80090303 + SEC_E_INTERNAL_ERROR Handle = 0x80090304 + SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 + SEC_E_NOT_OWNER Handle = 0x80090306 + SEC_E_CANNOT_INSTALL Handle = 0x80090307 + SEC_E_INVALID_TOKEN Handle = 0x80090308 + SEC_E_CANNOT_PACK Handle = 0x80090309 + SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A + SEC_E_NO_IMPERSONATION Handle = 0x8009030B + SEC_E_LOGON_DENIED Handle = 0x8009030C + SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D + SEC_E_NO_CREDENTIALS Handle = 0x8009030E + SEC_E_MESSAGE_ALTERED Handle = 0x8009030F + SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 + SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 + SEC_I_CONTINUE_NEEDED Handle = 0x00090312 + SEC_I_COMPLETE_NEEDED Handle = 0x00090313 + SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 + SEC_I_LOCAL_LOGON Handle = 0x00090315 + SEC_E_BAD_PKGID Handle = 0x80090316 + SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 + SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 + SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 + SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 + SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 + SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 + SEC_I_RENEGOTIATE Handle = 0x00090321 + SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 + SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 + SEC_E_TIME_SKEW Handle = 0x80090324 + SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 + SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 + SEC_E_CERT_UNKNOWN Handle = 0x80090327 + SEC_E_CERT_EXPIRED Handle = 0x80090328 + SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 + SEC_E_DECRYPT_FAILURE Handle = 0x80090330 + SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 + SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 + SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 + SEC_E_NO_TGT_REPLY Handle = 0x80090334 + SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 + SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 + SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 + SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 + SEC_E_MUST_BE_KDC Handle = 0x80090339 + SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A + SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B + SEC_E_NO_PA_DATA Handle = 0x8009033C + SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D + SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E + SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F + SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 + SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 + SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 + SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 + SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 + SEC_E_BAD_BINDINGS Handle = 0x80090346 + SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 + SEC_E_NO_KERB_KEY Handle = 0x80090348 + SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 + SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 + SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 + SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 + SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 + SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 + SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 + SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 + SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 + SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 + SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 + SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A + SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B + SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C + SEC_E_INVALID_PARAMETER Handle = 0x8009035D + SEC_E_DELEGATION_POLICY Handle = 0x8009035E + SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F + SEC_I_NO_RENEGOTIATION Handle = 0x00090360 + SEC_E_NO_CONTEXT Handle = 0x80090361 + SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 + SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 + SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 + SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 + SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 + SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 + SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 + SEC_E_INVALID_UPN_NAME Handle = 0x80090369 + SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR + SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION + CRYPT_E_MSG_ERROR Handle = 0x80091001 + CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 + CRYPT_E_OID_FORMAT Handle = 0x80091003 + CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 + CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 + CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 + CRYPT_E_HASH_VALUE Handle = 0x80091007 + CRYPT_E_INVALID_INDEX Handle = 0x80091008 + CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 + CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A + CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B + CRYPT_E_CONTROL_TYPE Handle = 0x8009100C + CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D + CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E + CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F + CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 + CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 + CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 + CRYPT_E_BAD_LEN Handle = 0x80092001 + CRYPT_E_BAD_ENCODE Handle = 0x80092002 + CRYPT_E_FILE_ERROR Handle = 0x80092003 + CRYPT_E_NOT_FOUND Handle = 0x80092004 + CRYPT_E_EXISTS Handle = 0x80092005 + CRYPT_E_NO_PROVIDER Handle = 0x80092006 + CRYPT_E_SELF_SIGNED Handle = 0x80092007 + CRYPT_E_DELETED_PREV Handle = 0x80092008 + CRYPT_E_NO_MATCH Handle = 0x80092009 + CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A + CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B + CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C + CRYPT_E_BAD_MSG Handle = 0x8009200D + CRYPT_E_NO_SIGNER Handle = 0x8009200E + CRYPT_E_PENDING_CLOSE Handle = 0x8009200F + CRYPT_E_REVOKED Handle = 0x80092010 + CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 + CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 + CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 + CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 + CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 + CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 + CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 + CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 + CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 + CRYPT_E_FILERESIZED Handle = 0x80092025 + CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 + CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 + CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 + CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 + CRYPT_E_NOT_IN_CTL Handle = 0x8009202A + CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B + CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C + CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D + CRYPT_E_OSS_ERROR Handle = 0x80093000 + OSS_MORE_BUF Handle = 0x80093001 + OSS_NEGATIVE_UINTEGER Handle = 0x80093002 + OSS_PDU_RANGE Handle = 0x80093003 + OSS_MORE_INPUT Handle = 0x80093004 + OSS_DATA_ERROR Handle = 0x80093005 + OSS_BAD_ARG Handle = 0x80093006 + OSS_BAD_VERSION Handle = 0x80093007 + OSS_OUT_MEMORY Handle = 0x80093008 + OSS_PDU_MISMATCH Handle = 0x80093009 + OSS_LIMITED Handle = 0x8009300A + OSS_BAD_PTR Handle = 0x8009300B + OSS_BAD_TIME Handle = 0x8009300C + OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D + OSS_MEM_ERROR Handle = 0x8009300E + OSS_BAD_TABLE Handle = 0x8009300F + OSS_TOO_LONG Handle = 0x80093010 + OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 + OSS_FATAL_ERROR Handle = 0x80093012 + OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 + OSS_NULL_TBL Handle = 0x80093014 + OSS_NULL_FCN Handle = 0x80093015 + OSS_BAD_ENCRULES Handle = 0x80093016 + OSS_UNAVAIL_ENCRULES Handle = 0x80093017 + OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 + OSS_UNIMPLEMENTED Handle = 0x80093019 + OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A + OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B + OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C + OSS_TABLE_MISMATCH Handle = 0x8009301D + OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E + OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F + OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 + OSS_OUT_OF_RANGE Handle = 0x80093021 + OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 + OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 + OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 + OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 + OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 + OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 + OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 + OSS_API_DLL_NOT_LINKED Handle = 0x80093029 + OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A + OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B + OSS_OPEN_TYPE_ERROR Handle = 0x8009302C + OSS_MUTEX_NOT_CREATED Handle = 0x8009302D + OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E + CRYPT_E_ASN1_ERROR Handle = 0x80093100 + CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 + CRYPT_E_ASN1_EOD Handle = 0x80093102 + CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 + CRYPT_E_ASN1_LARGE Handle = 0x80093104 + CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 + CRYPT_E_ASN1_MEMORY Handle = 0x80093106 + CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 + CRYPT_E_ASN1_BADPDU Handle = 0x80093108 + CRYPT_E_ASN1_BADARGS Handle = 0x80093109 + CRYPT_E_ASN1_BADREAL Handle = 0x8009310A + CRYPT_E_ASN1_BADTAG Handle = 0x8009310B + CRYPT_E_ASN1_CHOICE Handle = 0x8009310C + CRYPT_E_ASN1_RULE Handle = 0x8009310D + CRYPT_E_ASN1_UTF8 Handle = 0x8009310E + CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 + CRYPT_E_ASN1_NYI Handle = 0x80093134 + CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 + CRYPT_E_ASN1_NOEOD Handle = 0x80093202 + CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 + CERTSRV_E_NO_REQUEST Handle = 0x80094002 + CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 + CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 + CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 + CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 + CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 + CERTSRV_E_ROLECONFLICT Handle = 0x80094008 + CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 + CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A + CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B + CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C + CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D + CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E + CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F + CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 + CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 + CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 + CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 + CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 + CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 + CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 + CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 + CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 + CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 + CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 + CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 + CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 + CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 + CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 + CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 + CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 + CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 + CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 + CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A + CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B + CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C + CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D + CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E + CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F + CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 + CERTSRV_E_KEY_LENGTH Handle = 0x80094811 + CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 + CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 + CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 + CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 + CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 + CERTSRV_E_INVALID_EK Handle = 0x80094817 + CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 + CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 + CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A + CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B + CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C + CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D + CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E + CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F + CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 + XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 + XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 + XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 + XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 + XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 + XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 + TRUST_E_SYSTEM_ERROR Handle = 0x80096001 + TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 + TRUST_E_COUNTER_SIGNER Handle = 0x80096003 + TRUST_E_CERT_SIGNATURE Handle = 0x80096004 + TRUST_E_TIME_STAMP Handle = 0x80096005 + TRUST_E_BAD_DIGEST Handle = 0x80096010 + TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 + TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 + TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E + MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 + MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 + MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 + MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 + MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 + MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 + MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 + MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 + MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 + MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A + MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B + MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C + MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D + MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 + MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 + MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 + MSSIPOTF_E_FILE Handle = 0x80097013 + MSSIPOTF_E_CRYPT Handle = 0x80097014 + MSSIPOTF_E_BADVERSION Handle = 0x80097015 + MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 + MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 + MSSIPOTF_E_STRUCTURE Handle = 0x80097018 + ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 + NTE_OP_OK syscall.Errno = 0 + TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 + TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 + TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 + TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 + DIGSIG_E_ENCODE Handle = 0x800B0005 + DIGSIG_E_DECODE Handle = 0x800B0006 + DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 + DIGSIG_E_CRYPTO Handle = 0x800B0008 + PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 + PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A + PERSIST_E_NOTSELFSIZING Handle = 0x800B000B + TRUST_E_NOSIGNATURE Handle = 0x800B0100 + CERT_E_EXPIRED Handle = 0x800B0101 + CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 + CERT_E_ROLE Handle = 0x800B0103 + CERT_E_PATHLENCONST Handle = 0x800B0104 + CERT_E_CRITICAL Handle = 0x800B0105 + CERT_E_PURPOSE Handle = 0x800B0106 + CERT_E_ISSUERCHAINING Handle = 0x800B0107 + CERT_E_MALFORMED Handle = 0x800B0108 + CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 + CERT_E_CHAINING Handle = 0x800B010A + TRUST_E_FAIL Handle = 0x800B010B + CERT_E_REVOKED Handle = 0x800B010C + CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D + CERT_E_REVOCATION_FAILURE Handle = 0x800B010E + CERT_E_CN_NO_MATCH Handle = 0x800B010F + CERT_E_WRONG_USAGE Handle = 0x800B0110 + TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 + CERT_E_UNTRUSTEDCA Handle = 0x800B0112 + CERT_E_INVALID_POLICY Handle = 0x800B0113 + CERT_E_INVALID_NAME Handle = 0x800B0114 + SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 + SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 + SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 + SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 + SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 + SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 + SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 + SPAPI_E_NO_BACKUP Handle = 0x800F0103 + SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 + SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 + SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 + SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 + SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 + SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 + SPAPI_E_INVALID_CLASS Handle = 0x800F0206 + SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 + SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 + SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 + SPAPI_E_NO_INF Handle = 0x800F020A + SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B + SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C + SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D + SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E + SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F + SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 + SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 + SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 + SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 + SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 + SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 + SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 + SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 + SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 + SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 + SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A + SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B + SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C + SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D + SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E + SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F + SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 + SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 + SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 + SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 + SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 + SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 + SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 + SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 + SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 + SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 + SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A + SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B + SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C + SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D + SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E + SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F + SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 + SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 + SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 + SPAPI_E_INVALID_TARGET Handle = 0x800F0233 + SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 + SPAPI_E_IN_WOW64 Handle = 0x800F0235 + SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 + SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 + SPAPI_E_SCE_DISABLED Handle = 0x800F0238 + SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 + SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A + SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B + SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C + SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D + SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E + SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F + SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 + SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 + SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 + SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 + SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 + SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 + SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 + SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 + SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 + SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 + SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A + SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B + SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C + SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 + SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 + SCARD_S_SUCCESS = S_OK + SCARD_F_INTERNAL_ERROR Handle = 0x80100001 + SCARD_E_CANCELLED Handle = 0x80100002 + SCARD_E_INVALID_HANDLE Handle = 0x80100003 + SCARD_E_INVALID_PARAMETER Handle = 0x80100004 + SCARD_E_INVALID_TARGET Handle = 0x80100005 + SCARD_E_NO_MEMORY Handle = 0x80100006 + SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 + SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 + SCARD_E_UNKNOWN_READER Handle = 0x80100009 + SCARD_E_TIMEOUT Handle = 0x8010000A + SCARD_E_SHARING_VIOLATION Handle = 0x8010000B + SCARD_E_NO_SMARTCARD Handle = 0x8010000C + SCARD_E_UNKNOWN_CARD Handle = 0x8010000D + SCARD_E_CANT_DISPOSE Handle = 0x8010000E + SCARD_E_PROTO_MISMATCH Handle = 0x8010000F + SCARD_E_NOT_READY Handle = 0x80100010 + SCARD_E_INVALID_VALUE Handle = 0x80100011 + SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 + SCARD_F_COMM_ERROR Handle = 0x80100013 + SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 + SCARD_E_INVALID_ATR Handle = 0x80100015 + SCARD_E_NOT_TRANSACTED Handle = 0x80100016 + SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 + SCARD_P_SHUTDOWN Handle = 0x80100018 + SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 + SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A + SCARD_E_DUPLICATE_READER Handle = 0x8010001B + SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C + SCARD_E_NO_SERVICE Handle = 0x8010001D + SCARD_E_SERVICE_STOPPED Handle = 0x8010001E + SCARD_E_UNEXPECTED Handle = 0x8010001F + SCARD_E_ICC_INSTALLATION Handle = 0x80100020 + SCARD_E_ICC_CREATEORDER Handle = 0x80100021 + SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 + SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 + SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 + SCARD_E_NO_DIR Handle = 0x80100025 + SCARD_E_NO_FILE Handle = 0x80100026 + SCARD_E_NO_ACCESS Handle = 0x80100027 + SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 + SCARD_E_BAD_SEEK Handle = 0x80100029 + SCARD_E_INVALID_CHV Handle = 0x8010002A + SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B + SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C + SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D + SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E + SCARD_E_COMM_DATA_LOST Handle = 0x8010002F + SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 + SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 + SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 + SCARD_E_NO_PIN_CACHE Handle = 0x80100033 + SCARD_E_READ_ONLY_CARD Handle = 0x80100034 + SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 + SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 + SCARD_W_UNPOWERED_CARD Handle = 0x80100067 + SCARD_W_RESET_CARD Handle = 0x80100068 + SCARD_W_REMOVED_CARD Handle = 0x80100069 + SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A + SCARD_W_WRONG_CHV Handle = 0x8010006B + SCARD_W_CHV_BLOCKED Handle = 0x8010006C + SCARD_W_EOF Handle = 0x8010006D + SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E + SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F + SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 + SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 + SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 + COMADMIN_E_OBJECTERRORS Handle = 0x80110401 + COMADMIN_E_OBJECTINVALID Handle = 0x80110402 + COMADMIN_E_KEYMISSING Handle = 0x80110403 + COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 + COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 + COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 + COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 + COMADMIN_E_BADPATH Handle = 0x8011040A + COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B + COMADMIN_E_ROLEEXISTS Handle = 0x8011040C + COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D + COMADMIN_E_NOUSER Handle = 0x8011040F + COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 + COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 + COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 + COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 + COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 + COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 + COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 + COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A + COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B + COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D + COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E + COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F + COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 + COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 + COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 + COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 + COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 + COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 + COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 + COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A + COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B + COMADMIN_E_SESSION Handle = 0x8011042C + COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D + COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E + COMADMIN_E_REGISTERTLB Handle = 0x80110430 + COMADMIN_E_SYSTEMAPP Handle = 0x80110433 + COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 + COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 + COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 + COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 + COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 + COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 + COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B + COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C + COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E + COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F + COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 + COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 + COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 + COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 + COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A + COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B + COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C + COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D + COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E + COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F + COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 + COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 + COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 + COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 + COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 + COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A + COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B + COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C + COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D + COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 + COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 + COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 + COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 + COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 + COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 + COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 + COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 + COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 + COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 + COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 + COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 + COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 + COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 + COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 + COMQC_E_BAD_MESSAGE Handle = 0x80110604 + COMQC_E_UNAUTHENTICATED Handle = 0x80110605 + COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 + MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 + COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 + COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 + COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A + COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B + COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D + COMADMIN_E_USER_IN_SET Handle = 0x8011080E + COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F + COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 + COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 + COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 + COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 + COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 + COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 + COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 + COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 + COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B + COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C + COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D + COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E + COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F + COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 + COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 + COMADMIN_E_SAFERINVALID Handle = 0x80110822 + COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 + COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 + WER_S_REPORT_DEBUG Handle = 0x001B0000 + WER_S_REPORT_UPLOADED Handle = 0x001B0001 + WER_S_REPORT_QUEUED Handle = 0x001B0002 + WER_S_DISABLED Handle = 0x001B0003 + WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 + WER_S_DISABLED_QUEUE Handle = 0x001B0005 + WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 + WER_S_REPORT_ASYNC Handle = 0x001B0007 + WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 + WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 + WER_S_ASSERT_CONTINUE Handle = 0x001B000A + WER_S_THROTTLED Handle = 0x001B000B + WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C + WER_E_CRASH_FAILURE Handle = 0x801B8000 + WER_E_CANCELED Handle = 0x801B8001 + WER_E_NETWORK_FAILURE Handle = 0x801B8002 + WER_E_NOT_INITIALIZED Handle = 0x801B8003 + WER_E_ALREADY_REPORTING Handle = 0x801B8004 + WER_E_DUMP_THROTTLED Handle = 0x801B8005 + WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 + WER_E_TOO_HEAVY Handle = 0x801B8007 + ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 + ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 + ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 + ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 + ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 + ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 + ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 + ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 + ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 + ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 + ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A + ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B + ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C + ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D + ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E + ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F + ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 + ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 + ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 + ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 + ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 + ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 + ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 + ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 + ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 + ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 + ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A + ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B + ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C + ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 + ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 + ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 + DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 + DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 + DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 + DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 + DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 + DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 + DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 + DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 + ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 + ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 + ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 + ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 + ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 + ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 + ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 + ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 + ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 + ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A + ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 + ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 + ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 + ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 + ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 + ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 + ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 + ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 + ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 + ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 + ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A + ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B + ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C + ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D + ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E + ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F + ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 + ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 + ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 + ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 + ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 + ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 + ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 + ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 + ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 + ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 + ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 + ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 + ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 + ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 + ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 + ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 + ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 + ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 + ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 + ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 + ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 + ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 + ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 + ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 + ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A + ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B + ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 + ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 + ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 + ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 + ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 + ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 + ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 + ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C + ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D + ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E + ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F + ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 + ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 + ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 + ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 + ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 + ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B + ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C + ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D + ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E + ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F + ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 + ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 + ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 + ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 + ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 + ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 + ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 + ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 + ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 + ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 + ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A + ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B + ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C + ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D + ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E + ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F + ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 + ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 + ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 + ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 + ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 + ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 + ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 + ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 + ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 + ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A + ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B + ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D + ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E + ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F + ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 + ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 + ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 + ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 + ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 + ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 + ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 + ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 + ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A + ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B + ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C + ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 + ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 + ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F + ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 + ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 + ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 + ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 + ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 + ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 + ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 + ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 + ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 + ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 + ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A + ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B + ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C + ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 + ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 + ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 + ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 + ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 + ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B + ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C + ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E + ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F + ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 + ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 + ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 + ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 + ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 + ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A + ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C + ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D + ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F + ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 + ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 + ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 + ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 + ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 + ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 + ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 + ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 + ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 + ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 + ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B + ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C + ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D + ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 + ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 + ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA + ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB + ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC + ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE + ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF + ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 + ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 + ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 + ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 + ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 + ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 + ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 + ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 + ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 + NAP_E_INVALID_PACKET Handle = 0x80270001 + NAP_E_MISSING_SOH Handle = 0x80270002 + NAP_E_CONFLICTING_ID Handle = 0x80270003 + NAP_E_NO_CACHED_SOH Handle = 0x80270004 + NAP_E_STILL_BOUND Handle = 0x80270005 + NAP_E_NOT_REGISTERED Handle = 0x80270006 + NAP_E_NOT_INITIALIZED Handle = 0x80270007 + NAP_E_MISMATCHED_ID Handle = 0x80270008 + NAP_E_NOT_PENDING Handle = 0x80270009 + NAP_E_ID_NOT_FOUND Handle = 0x8027000A + NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B + NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C + NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D + NAP_E_ENTITY_DISABLED Handle = 0x8027000E + NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F + NAP_E_TOO_MANY_CALLS Handle = 0x80270010 + NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 + NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 + NAP_E_SHV_TIMEOUT Handle = 0x80270013 + TPM_E_ERROR_MASK Handle = 0x80280000 + TPM_E_AUTHFAIL Handle = 0x80280001 + TPM_E_BADINDEX Handle = 0x80280002 + TPM_E_BAD_PARAMETER Handle = 0x80280003 + TPM_E_AUDITFAILURE Handle = 0x80280004 + TPM_E_CLEAR_DISABLED Handle = 0x80280005 + TPM_E_DEACTIVATED Handle = 0x80280006 + TPM_E_DISABLED Handle = 0x80280007 + TPM_E_DISABLED_CMD Handle = 0x80280008 + TPM_E_FAIL Handle = 0x80280009 + TPM_E_BAD_ORDINAL Handle = 0x8028000A + TPM_E_INSTALL_DISABLED Handle = 0x8028000B + TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C + TPM_E_KEYNOTFOUND Handle = 0x8028000D + TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E + TPM_E_MIGRATEFAIL Handle = 0x8028000F + TPM_E_INVALID_PCR_INFO Handle = 0x80280010 + TPM_E_NOSPACE Handle = 0x80280011 + TPM_E_NOSRK Handle = 0x80280012 + TPM_E_NOTSEALED_BLOB Handle = 0x80280013 + TPM_E_OWNER_SET Handle = 0x80280014 + TPM_E_RESOURCES Handle = 0x80280015 + TPM_E_SHORTRANDOM Handle = 0x80280016 + TPM_E_SIZE Handle = 0x80280017 + TPM_E_WRONGPCRVAL Handle = 0x80280018 + TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 + TPM_E_SHA_THREAD Handle = 0x8028001A + TPM_E_SHA_ERROR Handle = 0x8028001B + TPM_E_FAILEDSELFTEST Handle = 0x8028001C + TPM_E_AUTH2FAIL Handle = 0x8028001D + TPM_E_BADTAG Handle = 0x8028001E + TPM_E_IOERROR Handle = 0x8028001F + TPM_E_ENCRYPT_ERROR Handle = 0x80280020 + TPM_E_DECRYPT_ERROR Handle = 0x80280021 + TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 + TPM_E_NO_ENDORSEMENT Handle = 0x80280023 + TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 + TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 + TPM_E_INVALID_POSTINIT Handle = 0x80280026 + TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 + TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 + TPM_E_BAD_MIGRATION Handle = 0x80280029 + TPM_E_BAD_SCHEME Handle = 0x8028002A + TPM_E_BAD_DATASIZE Handle = 0x8028002B + TPM_E_BAD_MODE Handle = 0x8028002C + TPM_E_BAD_PRESENCE Handle = 0x8028002D + TPM_E_BAD_VERSION Handle = 0x8028002E + TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F + TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 + TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 + TPM_E_NOTRESETABLE Handle = 0x80280032 + TPM_E_NOTLOCAL Handle = 0x80280033 + TPM_E_BAD_TYPE Handle = 0x80280034 + TPM_E_INVALID_RESOURCE Handle = 0x80280035 + TPM_E_NOTFIPS Handle = 0x80280036 + TPM_E_INVALID_FAMILY Handle = 0x80280037 + TPM_E_NO_NV_PERMISSION Handle = 0x80280038 + TPM_E_REQUIRES_SIGN Handle = 0x80280039 + TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A + TPM_E_AUTH_CONFLICT Handle = 0x8028003B + TPM_E_AREA_LOCKED Handle = 0x8028003C + TPM_E_BAD_LOCALITY Handle = 0x8028003D + TPM_E_READ_ONLY Handle = 0x8028003E + TPM_E_PER_NOWRITE Handle = 0x8028003F + TPM_E_FAMILYCOUNT Handle = 0x80280040 + TPM_E_WRITE_LOCKED Handle = 0x80280041 + TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 + TPM_E_INVALID_STRUCTURE Handle = 0x80280043 + TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 + TPM_E_BAD_COUNTER Handle = 0x80280045 + TPM_E_NOT_FULLWRITE Handle = 0x80280046 + TPM_E_CONTEXT_GAP Handle = 0x80280047 + TPM_E_MAXNVWRITES Handle = 0x80280048 + TPM_E_NOOPERATOR Handle = 0x80280049 + TPM_E_RESOURCEMISSING Handle = 0x8028004A + TPM_E_DELEGATE_LOCK Handle = 0x8028004B + TPM_E_DELEGATE_FAMILY Handle = 0x8028004C + TPM_E_DELEGATE_ADMIN Handle = 0x8028004D + TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E + TPM_E_OWNER_CONTROL Handle = 0x8028004F + TPM_E_DAA_RESOURCES Handle = 0x80280050 + TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 + TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 + TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 + TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 + TPM_E_DAA_STAGE Handle = 0x80280055 + TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 + TPM_E_DAA_WRONG_W Handle = 0x80280057 + TPM_E_BAD_HANDLE Handle = 0x80280058 + TPM_E_BAD_DELEGATE Handle = 0x80280059 + TPM_E_BADCONTEXT Handle = 0x8028005A + TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B + TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C + TPM_E_MA_DESTINATION Handle = 0x8028005D + TPM_E_MA_SOURCE Handle = 0x8028005E + TPM_E_MA_AUTHORITY Handle = 0x8028005F + TPM_E_PERMANENTEK Handle = 0x80280061 + TPM_E_BAD_SIGNATURE Handle = 0x80280062 + TPM_E_NOCONTEXTSPACE Handle = 0x80280063 + TPM_20_E_ASYMMETRIC Handle = 0x80280081 + TPM_20_E_ATTRIBUTES Handle = 0x80280082 + TPM_20_E_HASH Handle = 0x80280083 + TPM_20_E_VALUE Handle = 0x80280084 + TPM_20_E_HIERARCHY Handle = 0x80280085 + TPM_20_E_KEY_SIZE Handle = 0x80280087 + TPM_20_E_MGF Handle = 0x80280088 + TPM_20_E_MODE Handle = 0x80280089 + TPM_20_E_TYPE Handle = 0x8028008A + TPM_20_E_HANDLE Handle = 0x8028008B + TPM_20_E_KDF Handle = 0x8028008C + TPM_20_E_RANGE Handle = 0x8028008D + TPM_20_E_AUTH_FAIL Handle = 0x8028008E + TPM_20_E_NONCE Handle = 0x8028008F + TPM_20_E_PP Handle = 0x80280090 + TPM_20_E_SCHEME Handle = 0x80280092 + TPM_20_E_SIZE Handle = 0x80280095 + TPM_20_E_SYMMETRIC Handle = 0x80280096 + TPM_20_E_TAG Handle = 0x80280097 + TPM_20_E_SELECTOR Handle = 0x80280098 + TPM_20_E_INSUFFICIENT Handle = 0x8028009A + TPM_20_E_SIGNATURE Handle = 0x8028009B + TPM_20_E_KEY Handle = 0x8028009C + TPM_20_E_POLICY_FAIL Handle = 0x8028009D + TPM_20_E_INTEGRITY Handle = 0x8028009F + TPM_20_E_TICKET Handle = 0x802800A0 + TPM_20_E_RESERVED_BITS Handle = 0x802800A1 + TPM_20_E_BAD_AUTH Handle = 0x802800A2 + TPM_20_E_EXPIRED Handle = 0x802800A3 + TPM_20_E_POLICY_CC Handle = 0x802800A4 + TPM_20_E_BINDING Handle = 0x802800A5 + TPM_20_E_CURVE Handle = 0x802800A6 + TPM_20_E_ECC_POINT Handle = 0x802800A7 + TPM_20_E_INITIALIZE Handle = 0x80280100 + TPM_20_E_FAILURE Handle = 0x80280101 + TPM_20_E_SEQUENCE Handle = 0x80280103 + TPM_20_E_PRIVATE Handle = 0x8028010B + TPM_20_E_HMAC Handle = 0x80280119 + TPM_20_E_DISABLED Handle = 0x80280120 + TPM_20_E_EXCLUSIVE Handle = 0x80280121 + TPM_20_E_ECC_CURVE Handle = 0x80280123 + TPM_20_E_AUTH_TYPE Handle = 0x80280124 + TPM_20_E_AUTH_MISSING Handle = 0x80280125 + TPM_20_E_POLICY Handle = 0x80280126 + TPM_20_E_PCR Handle = 0x80280127 + TPM_20_E_PCR_CHANGED Handle = 0x80280128 + TPM_20_E_UPGRADE Handle = 0x8028012D + TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E + TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F + TPM_20_E_REBOOT Handle = 0x80280130 + TPM_20_E_UNBALANCED Handle = 0x80280131 + TPM_20_E_COMMAND_SIZE Handle = 0x80280142 + TPM_20_E_COMMAND_CODE Handle = 0x80280143 + TPM_20_E_AUTHSIZE Handle = 0x80280144 + TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 + TPM_20_E_NV_RANGE Handle = 0x80280146 + TPM_20_E_NV_SIZE Handle = 0x80280147 + TPM_20_E_NV_LOCKED Handle = 0x80280148 + TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 + TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A + TPM_20_E_NV_SPACE Handle = 0x8028014B + TPM_20_E_NV_DEFINED Handle = 0x8028014C + TPM_20_E_BAD_CONTEXT Handle = 0x80280150 + TPM_20_E_CPHASH Handle = 0x80280151 + TPM_20_E_PARENT Handle = 0x80280152 + TPM_20_E_NEEDS_TEST Handle = 0x80280153 + TPM_20_E_NO_RESULT Handle = 0x80280154 + TPM_20_E_SENSITIVE Handle = 0x80280155 + TPM_E_COMMAND_BLOCKED Handle = 0x80280400 + TPM_E_INVALID_HANDLE Handle = 0x80280401 + TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 + TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 + TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 + TPM_E_RETRY Handle = 0x80280800 + TPM_E_NEEDS_SELFTEST Handle = 0x80280801 + TPM_E_DOING_SELFTEST Handle = 0x80280802 + TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 + TPM_20_E_CONTEXT_GAP Handle = 0x80280901 + TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 + TPM_20_E_SESSION_MEMORY Handle = 0x80280903 + TPM_20_E_MEMORY Handle = 0x80280904 + TPM_20_E_SESSION_HANDLES Handle = 0x80280905 + TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 + TPM_20_E_LOCALITY Handle = 0x80280907 + TPM_20_E_YIELDED Handle = 0x80280908 + TPM_20_E_CANCELED Handle = 0x80280909 + TPM_20_E_TESTING Handle = 0x8028090A + TPM_20_E_NV_RATE Handle = 0x80280920 + TPM_20_E_LOCKOUT Handle = 0x80280921 + TPM_20_E_RETRY Handle = 0x80280922 + TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 + TBS_E_INTERNAL_ERROR Handle = 0x80284001 + TBS_E_BAD_PARAMETER Handle = 0x80284002 + TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 + TBS_E_INVALID_CONTEXT Handle = 0x80284004 + TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 + TBS_E_IOERROR Handle = 0x80284006 + TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 + TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 + TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 + TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A + TBS_E_SERVICE_START_PENDING Handle = 0x8028400B + TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C + TBS_E_COMMAND_CANCELED Handle = 0x8028400D + TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E + TBS_E_TPM_NOT_FOUND Handle = 0x8028400F + TBS_E_SERVICE_DISABLED Handle = 0x80284010 + TBS_E_NO_EVENT_LOG Handle = 0x80284011 + TBS_E_ACCESS_DENIED Handle = 0x80284012 + TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 + TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 + TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 + TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 + TPMAPI_E_INVALID_STATE Handle = 0x80290100 + TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 + TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 + TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 + TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 + TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 + TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 + TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 + TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 + TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 + TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A + TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B + TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C + TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D + TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E + TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F + TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 + TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 + TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 + TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 + TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 + TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 + TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 + TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 + TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 + TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 + TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A + TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B + TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C + TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D + TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E + TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F + TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 + TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 + TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 + TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 + TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 + TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 + TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 + TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 + TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 + TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 + TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A + TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B + TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C + TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 + TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 + TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 + TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 + TBSIMP_E_TPM_ERROR Handle = 0x80290204 + TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 + TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 + TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 + TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 + TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 + TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A + TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B + TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C + TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D + TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E + TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F + TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 + TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 + TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 + TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 + TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 + TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 + TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 + TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 + TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 + TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 + TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A + TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B + TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 + TPM_E_PPI_USER_ABORT Handle = 0x80290301 + TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 + TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 + TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 + TPM_E_PCP_ERROR_MASK Handle = 0x80290400 + TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 + TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 + TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 + TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 + TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 + TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 + TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 + TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 + TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 + TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A + TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B + TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C + TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E + TPM_E_KEY_NOT_LOADED Handle = 0x8029040F + TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 + TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 + TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 + TPM_E_NOT_PCR_BOUND Handle = 0x80290413 + TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 + TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 + TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 + TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 + TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 + TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 + TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A + TPM_E_LOCKED_OUT Handle = 0x8029041B + TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C + TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D + TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E + TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F + TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 + TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 + TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 + TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 + TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 + PLA_E_DCS_NOT_FOUND Handle = 0x80300002 + PLA_E_DCS_IN_USE Handle = 0x803000AA + PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 + PLA_E_NO_MIN_DISK Handle = 0x80300070 + PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 + PLA_S_PROPERTY_IGNORED Handle = 0x00300100 + PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 + PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 + PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 + PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 + PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 + PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 + PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 + PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 + PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 + PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A + PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B + PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C + PLA_E_NO_DUPLICATES Handle = 0x8030010D + PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E + PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F + PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 + PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 + PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 + PLA_E_CABAPI_FAILURE Handle = 0x80300113 + FVE_E_LOCKED_VOLUME Handle = 0x80310000 + FVE_E_NOT_ENCRYPTED Handle = 0x80310001 + FVE_E_NO_TPM_BIOS Handle = 0x80310002 + FVE_E_NO_MBR_METRIC Handle = 0x80310003 + FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 + FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 + FVE_E_WRONG_BOOTMGR Handle = 0x80310006 + FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 + FVE_E_NOT_ACTIVATED Handle = 0x80310008 + FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 + FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A + FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B + FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C + FVE_E_AD_NO_VALUES Handle = 0x8031000D + FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E + FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F + FVE_E_BAD_INFORMATION Handle = 0x80310010 + FVE_E_TOO_SMALL Handle = 0x80310011 + FVE_E_SYSTEM_VOLUME Handle = 0x80310012 + FVE_E_FAILED_WRONG_FS Handle = 0x80310013 + FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 + FVE_E_NOT_SUPPORTED Handle = 0x80310015 + FVE_E_BAD_DATA Handle = 0x80310016 + FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 + FVE_E_TPM_NOT_OWNED Handle = 0x80310018 + FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 + FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A + FVE_E_CONV_READ Handle = 0x8031001B + FVE_E_CONV_WRITE Handle = 0x8031001C + FVE_E_KEY_REQUIRED Handle = 0x8031001D + FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E + FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F + FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 + FVE_E_PROTECTION_DISABLED Handle = 0x80310021 + FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 + FVE_E_FOREIGN_VOLUME Handle = 0x80310023 + FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 + FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 + FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 + FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 + FVE_E_NOT_OS_VOLUME Handle = 0x80310028 + FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 + FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A + FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B + FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C + FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D + FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E + FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 + FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 + FVE_E_RELATIVE_PATH Handle = 0x80310032 + FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 + FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 + FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 + FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 + FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 + FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 + FVE_E_NOT_DECRYPTED Handle = 0x80310039 + FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A + FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B + FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C + FVE_E_KEYFILE_INVALID Handle = 0x8031003D + FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E + FVE_E_TPM_DISABLED Handle = 0x8031003F + FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 + FVE_E_TPM_INVALID_PCR Handle = 0x80310041 + FVE_E_TPM_NO_VMK Handle = 0x80310042 + FVE_E_PIN_INVALID Handle = 0x80310043 + FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 + FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 + FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 + FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 + FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 + FVE_E_NO_LICENSE Handle = 0x80310049 + FVE_E_NOT_ON_STACK Handle = 0x8031004A + FVE_E_FS_MOUNTED Handle = 0x8031004B + FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C + FVE_E_DRY_RUN_FAILED Handle = 0x8031004D + FVE_E_REBOOT_REQUIRED Handle = 0x8031004E + FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F + FVE_E_RAW_ACCESS Handle = 0x80310050 + FVE_E_RAW_BLOCKED Handle = 0x80310051 + FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 + FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 + FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 + FVE_E_MOR_FAILED Handle = 0x80310055 + FVE_E_HIDDEN_VOLUME Handle = 0x80310056 + FVE_E_TRANSIENT_STATE Handle = 0x80310057 + FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 + FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 + FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A + FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B + FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C + FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D + FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E + FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F + FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 + FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 + FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 + FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 + FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 + FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 + FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 + FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 + FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 + FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 + FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A + FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B + FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C + FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D + FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E + FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F + FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 + FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 + FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 + FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 + FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 + FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 + FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 + FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 + FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 + FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 + FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 + FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 + FVE_E_RECOVERY_PARTITION Handle = 0x80310082 + FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 + FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 + FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 + FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 + FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 + FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 + FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 + FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 + FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 + FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 + FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 + FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 + FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 + FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 + FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 + FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 + FVE_E_ENH_PIN_INVALID Handle = 0x80310099 + FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A + FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B + FVE_E_EFI_ONLY Handle = 0x8031009C + FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D + FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E + FVE_E_INVALID_NKP_CERT Handle = 0x8031009F + FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 + FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 + FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 + FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 + FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 + FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 + FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 + FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 + FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 + FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 + FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA + FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB + FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC + FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD + FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE + FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF + FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 + FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 + FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 + FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 + FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 + FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 + FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 + FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 + FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 + FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 + FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA + FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB + FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC + FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD + FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE + FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF + FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 + FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 + FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 + FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 + FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 + FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 + FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 + FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 + FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 + FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 + FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA + FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB + FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC + FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD + FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE + FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF + FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 + FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 + FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 + FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 + FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 + FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 + FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 + FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 + FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 + FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 + FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 + FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 + FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 + FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 + FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 + FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 + FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 + FWP_E_NOT_FOUND Handle = 0x80320008 + FWP_E_ALREADY_EXISTS Handle = 0x80320009 + FWP_E_IN_USE Handle = 0x8032000A + FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B + FWP_E_WRONG_SESSION Handle = 0x8032000C + FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D + FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E + FWP_E_TXN_ABORTED Handle = 0x8032000F + FWP_E_SESSION_ABORTED Handle = 0x80320010 + FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 + FWP_E_TIMEOUT Handle = 0x80320012 + FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 + FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 + FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 + FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 + FWP_E_BUILTIN_OBJECT Handle = 0x80320017 + FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 + FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 + FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A + FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B + FWP_E_NULL_POINTER Handle = 0x8032001C + FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D + FWP_E_INVALID_FLAGS Handle = 0x8032001E + FWP_E_INVALID_NET_MASK Handle = 0x8032001F + FWP_E_INVALID_RANGE Handle = 0x80320020 + FWP_E_INVALID_INTERVAL Handle = 0x80320021 + FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 + FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 + FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 + FWP_E_INVALID_WEIGHT Handle = 0x80320025 + FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 + FWP_E_TYPE_MISMATCH Handle = 0x80320027 + FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 + FWP_E_RESERVED Handle = 0x80320029 + FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A + FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B + FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C + FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D + FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E + FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F + FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 + FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 + FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 + FWP_E_NEVER_MATCH Handle = 0x80320033 + FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 + FWP_E_INVALID_PARAMETER Handle = 0x80320035 + FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 + FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 + FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 + FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 + FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A + FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B + FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C + FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D + FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E + FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F + FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 + FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 + FWP_E_INVALID_DNS_NAME Handle = 0x80320042 + FWP_E_STILL_ON Handle = 0x80320043 + FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 + FWP_E_DROP_NOICMP Handle = 0x80320104 + WS_S_ASYNC Handle = 0x003D0000 + WS_S_END Handle = 0x003D0001 + WS_E_INVALID_FORMAT Handle = 0x803D0000 + WS_E_OBJECT_FAULTED Handle = 0x803D0001 + WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 + WS_E_INVALID_OPERATION Handle = 0x803D0003 + WS_E_OPERATION_ABORTED Handle = 0x803D0004 + WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 + WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 + WS_E_OPERATION_ABANDONED Handle = 0x803D0007 + WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 + WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 + WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A + WS_E_ADDRESS_IN_USE Handle = 0x803D000B + WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C + WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D + WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E + WS_E_ENDPOINT_FAILURE Handle = 0x803D000F + WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 + WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 + WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 + WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 + WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 + WS_E_PROXY_FAILURE Handle = 0x803D0015 + WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 + WS_E_NOT_SUPPORTED Handle = 0x803D0017 + WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 + WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 + WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A + WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B + WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C + WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D + WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E + WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F + WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 + WS_E_OTHER Handle = 0x803D0021 + WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 + WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 + ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 + ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 + ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 + ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 + ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 + ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 + ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 + ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A + ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B + ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C + ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D + ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB + ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F + ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 + ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 + ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 + ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 + ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 + ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 + ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 + ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A + ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B + ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C + ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D + ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E + ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F + ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 + ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 + ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A + ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B + ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C + ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D + ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E + ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F + ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 + ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 + ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 + ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 + ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 + ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 + ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 + ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 + ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 + ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 + ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 + ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 + ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F + ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 + ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 + ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 + ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 + ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 + ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 + ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 + ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 + ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 + ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 + ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A + ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B + ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C + ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D + ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E + ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 + ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 + ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 + ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 + ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 + ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 + ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 + ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 + ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 + ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A + ERROR_HV_NO_DATA syscall.Errno = 0xC035001B + ERROR_HV_INACTIVE syscall.Errno = 0xC035001C + ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D + ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E + ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 + ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 + ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C + ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D + ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E + ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F + ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 + ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 + ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 + ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 + ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 + ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 + ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 + ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 + ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F + ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 + ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 + ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 + ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 + ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 + ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 + ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 + ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 + ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 + ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 + ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 + ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 + ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 + ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 + ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A + ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B + ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C + ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D + ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E + ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F + ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 + ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 + ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 + ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 + ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 + ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 + ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 + ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 + ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 + ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 + ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A + ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B + ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C + ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D + ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E + ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F + ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 + ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 + ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 + ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 + ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 + ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 + ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 + ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 + ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 + ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 + ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A + ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 + ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 + ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 + ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 + ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 + ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 + ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 + ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 + ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 + ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 + ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A + ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B + ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C + ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D + ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E + ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F + ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 + ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 + ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 + ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 + HCS_E_TERMINATED_DURING_START Handle = 0x80370100 + HCS_E_IMAGE_MISMATCH Handle = 0x80370101 + HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 + HCS_E_INVALID_STATE Handle = 0x80370105 + HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 + HCS_E_TERMINATED Handle = 0x80370107 + HCS_E_CONNECT_FAILED Handle = 0x80370108 + HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 + HCS_E_CONNECTION_CLOSED Handle = 0x8037010A + HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B + HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C + HCS_E_INVALID_JSON Handle = 0x8037010D + HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E + HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F + HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 + HCS_E_PROTOCOL_ERROR Handle = 0x80370111 + HCS_E_INVALID_LAYER Handle = 0x80370112 + HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 + HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 + HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 + HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 + HCS_E_OPERATION_PENDING Handle = 0x80370117 + HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 + HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 + HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A + HCS_E_ACCESS_DENIED Handle = 0x8037011B + HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C + ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 + ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 + WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 + WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 + WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 + WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 + WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 + WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 + WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 + WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 + WHV_E_INVALID_VP_STATE Handle = 0x80370308 + WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 + ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 + ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 + ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 + ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 + ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 + ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 + ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 + ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 + ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 + ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 + ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 + ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 + ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 + ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A + ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B + ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C + ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D + ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E + ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F + ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 + ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 + ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 + ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 + ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 + ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 + ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 + ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 + ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A + ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B + ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C + ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D + ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E + ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F + ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 + ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 + ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 + ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 + ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 + ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 + ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 + ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 + ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 + ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 + ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A + ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B + ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C + ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D + ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E + ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F + ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 + ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 + ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 + ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 + ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 + ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 + ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 + ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 + ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 + ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 + ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A + ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B + ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C + ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D + ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E + ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F + ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 + ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 + ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 + ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 + ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 + ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 + ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 + ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 + ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 + ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 + ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A + ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B + ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C + ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D + ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E + ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F + ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 + ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 + ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 + ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 + ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 + ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 + ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 + ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 + ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A + ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B + ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C + ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 + ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 + ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 + ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 + ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 + ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 + ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 + ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 + ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 + ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 + ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 + ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 + ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A + ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B + ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C + ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D + ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E + ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F + ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 + ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 + ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 + ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 + ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 + ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 + ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 + ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 + ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 + ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 + ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A + ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B + ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C + ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D + ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E + ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F + ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 + ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 + ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 + ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 + ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 + ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 + ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 + ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 + ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 + ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 + ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A + ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 + ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 + HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 + HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 + HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 + HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 + HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 + HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 + HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 + HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 + HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 + HCN_E_INVALID_NETWORK Handle = 0x803B000A + HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B + HCN_E_INVALID_ENDPOINT Handle = 0x803B000C + HCN_E_INVALID_POLICY Handle = 0x803B000D + HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E + HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F + HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 + HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 + HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 + HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 + HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 + HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 + HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 + HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 + HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 + HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 + HCN_E_REGKEY_FAILURE Handle = 0x803B001A + HCN_E_INVALID_JSON Handle = 0x803B001B + HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C + HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D + HCN_E_INVALID_IP Handle = 0x803B001E + HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F + HCN_E_MANAGER_STOPPED Handle = 0x803B0020 + GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 + GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 + GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 + GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 + GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 + GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 + GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 + GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 + GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 + SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 + SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 + SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 + SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 + SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 + SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 + SDIAG_E_DISABLED syscall.Errno = 0x803C0106 + SDIAG_E_TRUST syscall.Errno = 0x803C0107 + SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 + SDIAG_E_VERSION syscall.Errno = 0x803C0109 + SDIAG_E_RESOURCE syscall.Errno = 0x803C010A + SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B + WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 + WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 + WPN_E_INVALID_APP Handle = 0x803E0102 + WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 + WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 + WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 + WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 + WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 + WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 + WPN_E_CLOUD_DISABLED Handle = 0x803E0109 + WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 + WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A + WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B + WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C + WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 + WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 + WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 + WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 + WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 + WPN_E_TAG_SIZE Handle = 0x803E0116 + WPN_E_ACCESS_DENIED Handle = 0x803E0117 + WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 + WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 + WPN_E_DEV_ID_SIZE Handle = 0x803E0120 + WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A + WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B + WPN_E_OUT_OF_SESSION Handle = 0x803E0200 + WPN_E_POWER_SAVE Handle = 0x803E0201 + WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 + WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 + WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 + WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 + WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 + WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 + WPN_E_STORAGE_LOCKED Handle = 0x803E0208 + WPN_E_GROUP_SIZE Handle = 0x803E0209 + WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A + WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B + E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 + E_MBN_BAD_SIM Handle = 0x80548202 + E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 + E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 + E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 + E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 + E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 + E_MBN_RADIO_POWER_OFF Handle = 0x80548208 + E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 + E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A + E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B + E_MBN_INVALID_CACHE Handle = 0x8054820C + E_MBN_NOT_REGISTERED Handle = 0x8054820D + E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E + E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F + E_MBN_PIN_REQUIRED Handle = 0x80548210 + E_MBN_PIN_DISABLED Handle = 0x80548211 + E_MBN_FAILURE Handle = 0x80548212 + E_MBN_INVALID_PROFILE Handle = 0x80548218 + E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 + E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 + E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 + E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 + E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 + E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 + E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 + E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 + E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 + E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 + E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 + PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 + PEER_E_NOT_INITIALIZED Handle = 0x80630002 + PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 + PEER_E_NOT_LICENSED Handle = 0x80630004 + PEER_E_INVALID_GRAPH Handle = 0x80630010 + PEER_E_DBNAME_CHANGED Handle = 0x80630011 + PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 + PEER_E_GRAPH_NOT_READY Handle = 0x80630013 + PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 + PEER_E_GRAPH_IN_USE Handle = 0x80630015 + PEER_E_INVALID_DATABASE Handle = 0x80630016 + PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 + PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 + PEER_E_CONNECT_SELF Handle = 0x80630106 + PEER_E_ALREADY_LISTENING Handle = 0x80630107 + PEER_E_NODE_NOT_FOUND Handle = 0x80630108 + PEER_E_CONNECTION_FAILED Handle = 0x80630109 + PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A + PEER_E_CONNECTION_REFUSED Handle = 0x8063010B + PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 + PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 + PEER_E_NO_KEY_ACCESS Handle = 0x80630203 + PEER_E_GROUPS_EXIST Handle = 0x80630204 + PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 + PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 + PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 + PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 + PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 + PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 + PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 + PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 + PEER_E_INVALID_SEARCH Handle = 0x80630601 + PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 + PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 + PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 + PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 + PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 + PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 + PEER_E_NO_CLOUD Handle = 0x80631001 + PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 + PEER_E_INVALID_RECORD Handle = 0x80632010 + PEER_E_NOT_AUTHORIZED Handle = 0x80632020 + PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 + PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 + PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 + PEER_E_INVALID_PEER_NAME Handle = 0x80632050 + PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 + PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 + PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 + PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 + PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 + PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 + PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 + PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 + PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 + PEER_E_GROUP_NOT_READY Handle = 0x80632091 + PEER_E_GROUP_IN_USE Handle = 0x80632092 + PEER_E_INVALID_GROUP Handle = 0x80632093 + PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 + PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 + PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 + PEER_E_IDENTITY_DELETED Handle = 0x806320A0 + PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 + PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 + PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 + PEER_S_NO_EVENT_DATA Handle = 0x00630002 + PEER_S_ALREADY_CONNECTED Handle = 0x00632000 + PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 + PEER_S_NO_CONNECTIVITY Handle = 0x00630005 + PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 + PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 + PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 + PEER_E_NO_MORE Handle = 0x80634003 + PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 + PEER_E_INVITE_CANCELLED Handle = 0x80637000 + PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 + PEER_E_NOT_SIGNED_IN Handle = 0x80637003 + PEER_E_PRIVACY_DECLINED Handle = 0x80637004 + PEER_E_TIMEOUT Handle = 0x80637005 + PEER_E_INVALID_ADDRESS Handle = 0x80637007 + PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 + PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 + PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A + PEER_E_FW_DECLINED Handle = 0x8063700B + UI_E_CREATE_FAILED Handle = 0x802A0001 + UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 + UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 + UI_E_OBJECT_SEALED Handle = 0x802A0004 + UI_E_VALUE_NOT_SET Handle = 0x802A0005 + UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 + UI_E_INVALID_OUTPUT Handle = 0x802A0007 + UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 + UI_E_DIFFERENT_OWNER Handle = 0x802A0009 + UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A + UI_E_FP_OVERFLOW Handle = 0x802A000B + UI_E_WRONG_THREAD Handle = 0x802A000C + UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 + UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 + UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 + UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 + UI_E_LOOPS_OVERLAP Handle = 0x802A0105 + UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 + UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 + UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 + UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 + UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A + UI_E_INVALID_DIMENSION Handle = 0x802A010B + UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C + UI_E_WINDOW_CLOSED Handle = 0x802A0201 + E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 + E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 + E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 + E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 + E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 + E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 + E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C + E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D + E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F + E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 + E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 + E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 + E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 + E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 + E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 + E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 + E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 + STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 + STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 + STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 + STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 + STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 + STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 + STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 + STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 + STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A + STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B + STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C + STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D + STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F + STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 + STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 + STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 + STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 + STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 + ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 + ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 + ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 + ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 + ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 + ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 + ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 + ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 + ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 + ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A + ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B + ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C + ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D + ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E + ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F + ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 + ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 + ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 + ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 + ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 + ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 + ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 + ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 + ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 + ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 + ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 + ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 + ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 + ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 + ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 + ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 + ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A + ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 + ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 + ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 + ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 + ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 + ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 + ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 + ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 + ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 + ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 + ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A + DXGI_STATUS_OCCLUDED Handle = 0x087A0001 + DXGI_STATUS_CLIPPED Handle = 0x087A0002 + DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 + DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 + DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 + DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 + DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 + DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 + DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 + DXGI_ERROR_MORE_DATA Handle = 0x887A0003 + DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 + DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 + DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 + DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 + DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A + DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B + DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C + DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 + DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 + DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 + DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 + DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 + DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 + DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 + DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 + DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 + DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A + DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B + DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C + DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D + DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E + DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 + DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 + DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 + DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 + DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A + DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 + DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F + DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 + DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 + DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 + DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 + DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 + DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 + DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 + D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 + D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 + D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 + D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 + D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 + D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 + D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 + D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 + D2DERR_WRONG_STATE Handle = 0x88990001 + D2DERR_NOT_INITIALIZED Handle = 0x88990002 + D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 + D2DERR_SCANNER_FAILED Handle = 0x88990004 + D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 + D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 + D2DERR_ZERO_VECTOR Handle = 0x88990007 + D2DERR_INTERNAL_ERROR Handle = 0x88990008 + D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 + D2DERR_INVALID_CALL Handle = 0x8899000A + D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B + D2DERR_RECREATE_TARGET Handle = 0x8899000C + D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D + D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E + D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F + D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 + D2DERR_BAD_NUMBER Handle = 0x88990011 + D2DERR_WRONG_FACTORY Handle = 0x88990012 + D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 + D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 + D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 + D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 + D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 + D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 + D2DERR_WIN32_ERROR Handle = 0x88990019 + D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A + D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B + D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C + D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D + D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E + D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F + D2DERR_CYCLIC_GRAPH Handle = 0x88990020 + D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 + D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 + D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 + D2DERR_INVALID_TARGET Handle = 0x88990024 + D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 + D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 + D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 + D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 + D2DERR_INVALID_PROPERTY Handle = 0x88990029 + D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A + D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B + D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C + D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D + D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E + DWRITE_E_FILEFORMAT Handle = 0x88985000 + DWRITE_E_UNEXPECTED Handle = 0x88985001 + DWRITE_E_NOFONT Handle = 0x88985002 + DWRITE_E_FILENOTFOUND Handle = 0x88985003 + DWRITE_E_FILEACCESS Handle = 0x88985004 + DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 + DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 + DWRITE_E_CACHEFORMAT Handle = 0x88985007 + DWRITE_E_CACHEVERSION Handle = 0x88985008 + DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 + DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A + DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B + DWRITE_E_NOCOLOR Handle = 0x8898500C + DWRITE_E_REMOTEFONT Handle = 0x8898500D + DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E + DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F + DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 + WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 + WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 + WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 + WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B + WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C + WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D + WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 + WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 + WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 + WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 + WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 + WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 + WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 + WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 + WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 + WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 + WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 + WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 + WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 + WINCODEC_ERR_BADHEADER Handle = 0x88982F61 + WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 + WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 + WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 + WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 + WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 + WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 + WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 + WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 + WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A + WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B + WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C + WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D + WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E + WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F + WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 + WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 + WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 + WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 + WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 + WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 + WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 + MILERR_OBJECTBUSY Handle = 0x88980001 + MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 + MILERR_WIN32ERROR Handle = 0x88980003 + MILERR_SCANNER_FAILED Handle = 0x88980004 + MILERR_SCREENACCESSDENIED Handle = 0x88980005 + MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 + MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 + MILERR_ZEROVECTOR Handle = 0x88980008 + MILERR_TERMINATED Handle = 0x88980009 + MILERR_BADNUMBER Handle = 0x8898000A + MILERR_INTERNALERROR Handle = 0x88980080 + MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 + MILERR_INVALIDCALL Handle = 0x88980085 + MILERR_ALREADYLOCKED Handle = 0x88980086 + MILERR_NOTLOCKED Handle = 0x88980087 + MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 + MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 + MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A + MILERR_GENERIC_IGNORE Handle = 0x8898008B + MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C + MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D + MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E + MILERR_ALREADY_INITIALIZED Handle = 0x8898008F + MILERR_MISMATCHED_SIZE Handle = 0x88980090 + MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 + MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 + MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 + MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 + MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 + MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 + MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 + MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 + MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 + MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A + MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B + MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D + MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E + MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F + MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 + MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 + UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 + UCEERR_UNKNOWNPACKET Handle = 0x88980401 + UCEERR_ILLEGALPACKET Handle = 0x88980402 + UCEERR_MALFORMEDPACKET Handle = 0x88980403 + UCEERR_ILLEGALHANDLE Handle = 0x88980404 + UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 + UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 + UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 + UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 + UCEERR_BLOCKSFULL Handle = 0x88980409 + UCEERR_MEMORYFAILURE Handle = 0x8898040A + UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B + UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C + UCEERR_OUTOFHANDLES Handle = 0x8898040D + UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E + UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F + UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 + UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 + UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 + UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 + UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 + UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 + UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 + UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 + UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 + UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 + UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 + UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 + UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 + UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 + MILAVERR_NOCLOCK Handle = 0x88980500 + MILAVERR_NOMEDIATYPE Handle = 0x88980501 + MILAVERR_NOVIDEOMIXER Handle = 0x88980502 + MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 + MILAVERR_NOREADYFRAMES Handle = 0x88980504 + MILAVERR_MODULENOTLOADED Handle = 0x88980505 + MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 + MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 + MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 + MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 + MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A + MILAVERR_SEEKFAILED Handle = 0x8898050B + MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C + MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D + MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E + MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E + MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F + MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 + MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 + MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 + MILEFFECTSERR_RESERVED Handle = 0x88980613 + MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 + MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 + MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 + MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 + MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 + MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 + MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A + MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B + DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 + DWMERR_THEME_FAILED Handle = 0x88980701 + DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 + DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 + DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 + DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 + ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 + ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 + ONL_E_INVALID_APPLICATION Handle = 0x80860003 + ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 + ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 + ONL_E_FORCESIGNIN Handle = 0x80860006 + ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 + ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 + ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 + ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A + ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B + ONL_E_ACTION_REQUIRED Handle = 0x8086000C + ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D + ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E + ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F + ONL_E_REQUEST_THROTTLED Handle = 0x80860010 + FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 + FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 + E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 + E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 + E_UAC_DISABLED Handle = 0x80270252 + E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 + E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 + E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 + E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 + E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 + S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 + S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 + E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A + E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B + E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C + E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D + E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 + E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 + E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 + E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 + E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 + E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 + E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 + E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 + E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 + E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 + E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 + E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 + E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 + E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 + E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 + E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 + E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 + E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 + E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 + E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 + E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 + E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 + E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 + E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 + E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 + E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 + E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 + E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A + E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B + EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 + EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 + EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 + EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 + EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 + EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 + EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 + EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 + EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A + EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C + EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D + WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 + WEB_E_INVALID_XML Handle = 0x83750002 + WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 + WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 + WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 + WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 + WEB_E_INVALID_JSON_STRING Handle = 0x83750007 + WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 + WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 + HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 + HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 + HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 + HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 + HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C + HTTP_E_STATUS_MOVED Handle = 0x8019012D + HTTP_E_STATUS_REDIRECT Handle = 0x8019012E + HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F + HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 + HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 + HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 + HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 + HTTP_E_STATUS_DENIED Handle = 0x80190191 + HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 + HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 + HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 + HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 + HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 + HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 + HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 + HTTP_E_STATUS_CONFLICT Handle = 0x80190199 + HTTP_E_STATUS_GONE Handle = 0x8019019A + HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B + HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C + HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D + HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E + HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F + HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 + HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 + HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 + HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 + HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 + HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 + HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 + HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 + E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 + E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 + E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 + E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 + E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 + INPUT_E_OUT_OF_ORDER Handle = 0x80400000 + INPUT_E_REENTRANCY Handle = 0x80400001 + INPUT_E_MULTIMODAL Handle = 0x80400002 + INPUT_E_PACKET Handle = 0x80400003 + INPUT_E_FRAME Handle = 0x80400004 + INPUT_E_HISTORY Handle = 0x80400005 + INPUT_E_DEVICE_INFO Handle = 0x80400006 + INPUT_E_TRANSFORM Handle = 0x80400007 + INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 + INET_E_INVALID_URL Handle = 0x800C0002 + INET_E_NO_SESSION Handle = 0x800C0003 + INET_E_CANNOT_CONNECT Handle = 0x800C0004 + INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 + INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 + INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 + INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 + INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 + INET_E_NO_VALID_MEDIA Handle = 0x800C000A + INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B + INET_E_INVALID_REQUEST Handle = 0x800C000C + INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D + INET_E_SECURITY_PROBLEM Handle = 0x800C000E + INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F + INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 + INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 + INET_E_REDIRECT_FAILED Handle = 0x800C0014 + INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 + ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 + ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 + ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 + ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 + ERROR_IO_PREEMPTED Handle = 0x89010001 + JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 + WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 + WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 + WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 + WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 + WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 + WEP_E_NO_LICENSE Handle = 0x88010006 + WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 + WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 + WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 + ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 + ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 + ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 + ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 + ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 + ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 + ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 + ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 + ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 + ERROR_VHD_SHARED Handle = 0xC05CFF0A + ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B + ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C + ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 + ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 + WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 + WININET_E_TIMEOUT Handle = 0x80072EE2 + WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 + WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 + WININET_E_INVALID_URL Handle = 0x80072EE5 + WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 + WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 + WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 + WININET_E_INVALID_OPTION Handle = 0x80072EE9 + WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA + WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB + WININET_E_SHUTDOWN Handle = 0x80072EEC + WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED + WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE + WININET_E_LOGIN_FAILURE Handle = 0x80072EEF + WININET_E_INVALID_OPERATION Handle = 0x80072EF0 + WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 + WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 + WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 + WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 + WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 + WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 + WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 + WININET_E_NO_CONTEXT Handle = 0x80072EF8 + WININET_E_NO_CALLBACK Handle = 0x80072EF9 + WININET_E_REQUEST_PENDING Handle = 0x80072EFA + WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB + WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC + WININET_E_CANNOT_CONNECT Handle = 0x80072EFD + WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE + WININET_E_CONNECTION_RESET Handle = 0x80072EFF + WININET_E_FORCE_RETRY Handle = 0x80072F00 + WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 + WININET_E_NEED_UI Handle = 0x80072F02 + WININET_E_HANDLE_EXISTS Handle = 0x80072F04 + WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 + WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 + WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 + WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 + WININET_E_MIXED_SECURITY Handle = 0x80072F09 + WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A + WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B + WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C + WININET_E_INVALID_CA Handle = 0x80072F0D + WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E + WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F + WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 + WININET_E_DIALOG_PENDING Handle = 0x80072F11 + WININET_E_RETRY_DIALOG Handle = 0x80072F12 + WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 + WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 + WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 + WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 + WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 + WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 + WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 + WININET_E_INVALID_HEADER Handle = 0x80072F79 + WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A + WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B + WININET_E_REDIRECT_FAILED Handle = 0x80072F7C + WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D + WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E + WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F + WININET_E_DISCONNECTED Handle = 0x80072F83 + WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 + WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 + WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 + WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 + WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 + WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A + WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B + WININET_E_NOT_INITIALIZED Handle = 0x80072F8C + WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E + WININET_E_DECODING_FAILED Handle = 0x80072F8F + WININET_E_NOT_REDIRECTED Handle = 0x80072F80 + WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 + WININET_E_COOKIE_DECLINED Handle = 0x80072F82 + WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 + SQLITE_E_ERROR Handle = 0x87AF0001 + SQLITE_E_INTERNAL Handle = 0x87AF0002 + SQLITE_E_PERM Handle = 0x87AF0003 + SQLITE_E_ABORT Handle = 0x87AF0004 + SQLITE_E_BUSY Handle = 0x87AF0005 + SQLITE_E_LOCKED Handle = 0x87AF0006 + SQLITE_E_NOMEM Handle = 0x87AF0007 + SQLITE_E_READONLY Handle = 0x87AF0008 + SQLITE_E_INTERRUPT Handle = 0x87AF0009 + SQLITE_E_IOERR Handle = 0x87AF000A + SQLITE_E_CORRUPT Handle = 0x87AF000B + SQLITE_E_NOTFOUND Handle = 0x87AF000C + SQLITE_E_FULL Handle = 0x87AF000D + SQLITE_E_CANTOPEN Handle = 0x87AF000E + SQLITE_E_PROTOCOL Handle = 0x87AF000F + SQLITE_E_EMPTY Handle = 0x87AF0010 + SQLITE_E_SCHEMA Handle = 0x87AF0011 + SQLITE_E_TOOBIG Handle = 0x87AF0012 + SQLITE_E_CONSTRAINT Handle = 0x87AF0013 + SQLITE_E_MISMATCH Handle = 0x87AF0014 + SQLITE_E_MISUSE Handle = 0x87AF0015 + SQLITE_E_NOLFS Handle = 0x87AF0016 + SQLITE_E_AUTH Handle = 0x87AF0017 + SQLITE_E_FORMAT Handle = 0x87AF0018 + SQLITE_E_RANGE Handle = 0x87AF0019 + SQLITE_E_NOTADB Handle = 0x87AF001A + SQLITE_E_NOTICE Handle = 0x87AF001B + SQLITE_E_WARNING Handle = 0x87AF001C + SQLITE_E_ROW Handle = 0x87AF0064 + SQLITE_E_DONE Handle = 0x87AF0065 + SQLITE_E_IOERR_READ Handle = 0x87AF010A + SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A + SQLITE_E_IOERR_WRITE Handle = 0x87AF030A + SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A + SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A + SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A + SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A + SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A + SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A + SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A + SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A + SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A + SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A + SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A + SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A + SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A + SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A + SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A + SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A + SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A + SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A + SQLITE_E_IOERR_SEEK Handle = 0x87AF160A + SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A + SQLITE_E_IOERR_MMAP Handle = 0x87AF180A + SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A + SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A + SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 + SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 + SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 + SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 + SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 + SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E + SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E + SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E + SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E + SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B + SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 + SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 + SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 + SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 + SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 + SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 + SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 + SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 + SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 + SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 + SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 + SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 + SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 + SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 + SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 + SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B + SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B + SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C + UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 + UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 + UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 + UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 + UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 + UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 + UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 + UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 + UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 + UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A + UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B + UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C + UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D + UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E + UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F + UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 + UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 + UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 + UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 + UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 + UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 + UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 + UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 + UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 + UTC_E_INVALID_FILTER Handle = 0x87C51019 + UTC_E_EXE_TERMINATED Handle = 0x87C5101A + UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B + UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C + UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D + UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E + UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F + UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 + UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 + UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 + UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 + UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 + UTC_E_DELAY_TERMINATED Handle = 0x87C51025 + UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 + UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 + UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 + UTC_E_RPC_TIMEOUT Handle = 0x87C51029 + UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A + UTC_E_API_BUSY Handle = 0x87C5102B + UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C + UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D + UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E + UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F + UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 + UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 + UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 + UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 + UTC_E_BINARY_MISSING Handle = 0x87C51034 + UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 + UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 + UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 + UTC_E_THROTTLED Handle = 0x87C51038 + UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 + UTC_E_SCRIPT_MISSING Handle = 0x87C5103A + UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B + UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C + UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D + UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E + UTC_E_CERT_REV_FAILED Handle = 0x87C5103F + UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 + UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 + UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 + UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 + UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 + UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 + UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 + UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 + UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 + UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 + UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 + UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 + UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 + UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 + UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 + UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 + UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 + UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 + UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 + UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 + UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A + UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B + WINML_ERR_INVALID_DEVICE Handle = 0x88900001 + WINML_ERR_INVALID_BINDING Handle = 0x88900002 + WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 + WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 +) diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 00000000..6048ac67 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 00000000..ace2c19e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,3933 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + modole32 = NewLazySystemDLL("ole32.dll") + modntdll = NewLazySystemDLL("ntdll.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procSleepEx = modkernel32.NewProc("SleepEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") +) + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) + return +} + +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) + return +} + +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { + syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + return +} + +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) +} + +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { + var _p0 uint32 + if *saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { + var _p0 uint32 + if *groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) + return +} + +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 + return +} + +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { + var _p0 uint32 + if saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go new file mode 100644 index 00000000..7b27e6b1 --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go @@ -0,0 +1,62 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package cloudsql exposes access to Google Cloud SQL databases. + +This package does not work in App Engine "flexible environment". + +This package is intended for MySQL drivers to make App Engine-specific +connections. Applications should use this package through database/sql: +Select a pure Go MySQL driver that supports this package, and use sql.Open +with protocol "cloudsql" and an address of the Cloud SQL instance. + +A Go MySQL driver that has been tested to work well with Cloud SQL +is the go-sql-driver: + import "database/sql" + import _ "github.com/go-sql-driver/mysql" + + db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname") + + +Another driver that works well with Cloud SQL is the mymysql driver: + import "database/sql" + import _ "github.com/ziutek/mymysql/godrv" + + db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password") + + +Using either of these drivers, you can perform a standard SQL query. +This example assumes there is a table named 'users' with +columns 'first_name' and 'last_name': + + rows, err := db.Query("SELECT first_name, last_name FROM users") + if err != nil { + log.Errorf(ctx, "db.Query: %v", err) + } + defer rows.Close() + + for rows.Next() { + var firstName string + var lastName string + if err := rows.Scan(&firstName, &lastName); err != nil { + log.Errorf(ctx, "rows.Scan: %v", err) + continue + } + log.Infof(ctx, "First: %v - Last: %v", firstName, lastName) + } + if err := rows.Err(); err != nil { + log.Errorf(ctx, "Row error: %v", err) + } +*/ +package cloudsql + +import ( + "net" +) + +// Dial connects to the named Cloud SQL instance. +func Dial(instance string) (net.Conn, error) { + return connect(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go new file mode 100644 index 00000000..af62dba1 --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go @@ -0,0 +1,17 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package cloudsql + +import ( + "net" + + "appengine/cloudsql" +) + +func connect(instance string) (net.Conn, error) { + return cloudsql.Dial(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go new file mode 100644 index 00000000..90fa7b31 --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go @@ -0,0 +1,16 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package cloudsql + +import ( + "errors" + "net" +) + +func connect(instance string) (net.Conn, error) { + return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`) +} diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 00000000..b43746e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +const maxDelay = 120 * time.Second + +var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay} +var backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } +} + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by reseting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready) + } else { + setConnectivityState(connectivity.TransientFailure) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000..c2f2c772 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,327 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/health/v1/health.proto + +package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{0} +} +func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) +} +func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) +} +func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(dst, src) +} +func (m *HealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_HealthCheckRequest.Size(m) +} +func (m *HealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{1} +} +func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) +} +func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) +} +func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(dst, src) +} +func (m *HealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_HealthCheckResponse.Size(m) +} +func (m *HealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} + +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) } + +var fileDescriptor_health_6b1a06aa67f91efd = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, + 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, + 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, + 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, + 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, + 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, + 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, + 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, + 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, + 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, + 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, + 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, + 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh new file mode 100644 index 00000000..b11eccb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/health/v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto +popd +rm -f grpc_health_v1/*.pb.go +cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ + diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 00000000..c79f9d2a --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,165 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a perticular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a perticular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8bf1f744..aca37eed 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,18 @@ # cloud.google.com/go v0.40.0 cloud.google.com/go/compute/metadata -# github.com/aws/aws-sdk-go v1.19.39 +# github.com/Jeffail/gabs v1.1.1 +github.com/Jeffail/gabs +# github.com/NYTimes/gziphandler v1.1.1 +github.com/NYTimes/gziphandler +# github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 +github.com/StackExchange/wmi +# github.com/armon/go-metrics v0.3.0 +github.com/armon/go-metrics +# github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e +github.com/armon/go-proxyproto +# github.com/armon/go-radix v1.0.0 +github.com/armon/go-radix +# github.com/aws/aws-sdk-go v1.25.37 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil @@ -21,6 +33,7 @@ github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults @@ -31,6 +44,11 @@ github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +github.com/beorn7/perks/quantile +# github.com/bgentry/speakeasy v0.1.0 +github.com/bgentry/speakeasy # github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f github.com/briankassouf/jose github.com/briankassouf/jose/crypto @@ -40,6 +58,20 @@ github.com/briankassouf/jose/jwt github.com/cenkalti/backoff # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/elazarl/go-bindata-assetfs v1.0.0 +github.com/elazarl/go-bindata-assetfs +# github.com/fatih/color v1.7.0 +github.com/fatih/color +# github.com/go-ole/go-ole v1.2.1 +github.com/go-ole/go-ole +github.com/go-ole/go-ole/oleutil +# github.com/go-sql-driver/mysql v1.4.1 +github.com/go-sql-driver/mysql +# github.com/go-test/deep v1.0.2 +github.com/go-test/deep +# github.com/gogo/protobuf v1.2.1 +github.com/gogo/protobuf/io +github.com/gogo/protobuf/proto # github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -54,15 +86,34 @@ github.com/hashicorp/errwrap github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-gcp-common v0.5.0 github.com/hashicorp/go-gcp-common/gcputil +# github.com/hashicorp/go-hclog v0.12.0 +github.com/hashicorp/go-hclog +# github.com/hashicorp/go-immutable-radix v1.1.0 +github.com/hashicorp/go-immutable-radix +# github.com/hashicorp/go-memdb v1.0.2 +github.com/hashicorp/go-memdb +# github.com/hashicorp/go-msgpack v0.5.5 +github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-retryablehttp v0.5.4 +# github.com/hashicorp/go-plugin v1.0.1 +github.com/hashicorp/go-plugin +github.com/hashicorp/go-plugin/internal/plugin +# github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a +github.com/hashicorp/go-raftchunking +github.com/hashicorp/go-raftchunking/types +# github.com/hashicorp/go-retryablehttp v0.6.2 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-rootcerts v1.0.1 github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr +# github.com/hashicorp/go-uuid v1.0.2 +github.com/hashicorp/go-uuid +# github.com/hashicorp/go-version v1.2.0 +github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.5.3 +github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl @@ -74,33 +125,169 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/vault/api v1.0.5-0.20190909201928-35325e2c3262 +# github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 +github.com/hashicorp/raft +# github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab +github.com/hashicorp/raft-snapshot +# github.com/hashicorp/vault v1.3.3 +github.com/hashicorp/vault/audit +github.com/hashicorp/vault/builtin/plugin +github.com/hashicorp/vault/command/server +github.com/hashicorp/vault/helper/forwarding +github.com/hashicorp/vault/helper/hostutil +github.com/hashicorp/vault/helper/identity +github.com/hashicorp/vault/helper/identity/mfa +github.com/hashicorp/vault/helper/listenerutil +github.com/hashicorp/vault/helper/metricsutil +github.com/hashicorp/vault/helper/namespace +github.com/hashicorp/vault/helper/pgpkeys +github.com/hashicorp/vault/helper/proxyutil +github.com/hashicorp/vault/helper/reload +github.com/hashicorp/vault/helper/storagepacker +github.com/hashicorp/vault/helper/xor +github.com/hashicorp/vault/http +github.com/hashicorp/vault/physical/raft +github.com/hashicorp/vault/physical/raft/logstore +github.com/hashicorp/vault/plugins/database/mysql +github.com/hashicorp/vault/plugins/database/postgresql +github.com/hashicorp/vault/shamir +github.com/hashicorp/vault/vault +github.com/hashicorp/vault/vault/cluster +github.com/hashicorp/vault/vault/replication +github.com/hashicorp/vault/vault/seal +github.com/hashicorp/vault/vault/seal/shamir +# github.com/hashicorp/vault-plugin-secrets-kv v0.5.4 +github.com/hashicorp/vault-plugin-secrets-kv +# github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 github.com/hashicorp/vault/api -# github.com/hashicorp/vault/sdk v0.1.14-0.20190909201848-e0fbf9b652e2 +# github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d +github.com/hashicorp/vault/sdk/database/dbplugin +github.com/hashicorp/vault/sdk/database/helper/connutil +github.com/hashicorp/vault/sdk/database/helper/credsutil +github.com/hashicorp/vault/sdk/database/helper/dbutil +github.com/hashicorp/vault/sdk/framework +github.com/hashicorp/vault/sdk/helper/base62 +github.com/hashicorp/vault/sdk/helper/certutil github.com/hashicorp/vault/sdk/helper/compressutil github.com/hashicorp/vault/sdk/helper/consts +github.com/hashicorp/vault/sdk/helper/cryptoutil +github.com/hashicorp/vault/sdk/helper/dbtxn +github.com/hashicorp/vault/sdk/helper/entropy +github.com/hashicorp/vault/sdk/helper/errutil github.com/hashicorp/vault/sdk/helper/hclutil github.com/hashicorp/vault/sdk/helper/jsonutil +github.com/hashicorp/vault/sdk/helper/kdf +github.com/hashicorp/vault/sdk/helper/keysutil +github.com/hashicorp/vault/sdk/helper/license +github.com/hashicorp/vault/sdk/helper/locksutil +github.com/hashicorp/vault/sdk/helper/logging +github.com/hashicorp/vault/sdk/helper/mlock github.com/hashicorp/vault/sdk/helper/parseutil +github.com/hashicorp/vault/sdk/helper/pathmanager +github.com/hashicorp/vault/sdk/helper/pluginutil +github.com/hashicorp/vault/sdk/helper/policyutil +github.com/hashicorp/vault/sdk/helper/salt github.com/hashicorp/vault/sdk/helper/strutil +github.com/hashicorp/vault/sdk/helper/tlsutil +github.com/hashicorp/vault/sdk/helper/tokenutil +github.com/hashicorp/vault/sdk/helper/wrapping +github.com/hashicorp/vault/sdk/logical +github.com/hashicorp/vault/sdk/physical +github.com/hashicorp/vault/sdk/physical/inmem +github.com/hashicorp/vault/sdk/plugin +github.com/hashicorp/vault/sdk/plugin/pb +github.com/hashicorp/vault/sdk/version +# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d +github.com/hashicorp/yamux +# github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f +github.com/jefferai/isbadcipher +# github.com/jefferai/jsonx v1.0.0 +github.com/jefferai/jsonx # github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jmespath/go-jmespath +# github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f +github.com/keybase/go-crypto/brainpool +github.com/keybase/go-crypto/cast5 +github.com/keybase/go-crypto/curve25519 +github.com/keybase/go-crypto/ed25519 +github.com/keybase/go-crypto/ed25519/internal/edwards25519 +github.com/keybase/go-crypto/openpgp +github.com/keybase/go-crypto/openpgp/armor +github.com/keybase/go-crypto/openpgp/ecdh +github.com/keybase/go-crypto/openpgp/elgamal +github.com/keybase/go-crypto/openpgp/errors +github.com/keybase/go-crypto/openpgp/packet +github.com/keybase/go-crypto/openpgp/s2k +github.com/keybase/go-crypto/rsa +# github.com/lib/pq v1.2.0 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram +# github.com/mattn/go-colorable v0.1.4 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.10 +github.com/mattn/go-isatty +# github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mitchellh/cli v1.0.0 +github.com/mitchellh/cli +# github.com/mitchellh/copystructure v1.0.0 +github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir +# github.com/mitchellh/go-testing-interface v1.0.0 +github.com/mitchellh/go-testing-interface # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.1 +github.com/mitchellh/reflectwalk +# github.com/oklog/run v1.0.0 +github.com/oklog/run +# github.com/patrickmn/go-cache v2.1.0+incompatible +github.com/patrickmn/go-cache # github.com/pierrec/lz4 v2.2.6+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib +# github.com/posener/complete v1.2.1 +github.com/posener/complete +github.com/posener/complete/cmd +github.com/posener/complete/cmd/install +github.com/posener/complete/match +# github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 +github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/internal +# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f +github.com/prometheus/client_model/go +# github.com/prometheus/common v0.2.0 +github.com/prometheus/common/expfmt +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model +# github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 +github.com/prometheus/procfs +github.com/prometheus/procfs/internal/util +github.com/prometheus/procfs/nfs +github.com/prometheus/procfs/xfs # github.com/ryanuber/go-glob v1.0.0 github.com/ryanuber/go-glob +# github.com/shirou/gopsutil v2.19.9+incompatible +github.com/shirou/gopsutil/cpu +github.com/shirou/gopsutil/disk +github.com/shirou/gopsutil/host +github.com/shirou/gopsutil/internal/common +github.com/shirou/gopsutil/mem +github.com/shirou/gopsutil/net +github.com/shirou/gopsutil/process +# github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 +github.com/shirou/w32 # github.com/stretchr/objx v0.2.0 github.com/stretchr/objx # github.com/stretchr/testify v1.3.0 github.com/stretchr/testify/assert github.com/stretchr/testify/mock +# go.etcd.io/bbolt v1.3.2 +go.etcd.io/bbolt # go.opencensus.io v0.21.0 go.opencensus.io go.opencensus.io/internal @@ -118,10 +305,22 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.uber.org/atomic v1.4.0 +go.uber.org/atomic # golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 +golang.org/x/crypto/blake2b +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/curve25519 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/chacha20 +golang.org/x/crypto/internal/subtle golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/ssh # golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -137,8 +336,10 @@ golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a +# golang.org/x/sys v0.0.0-20191008105621-543471e840be +golang.org/x/sys/cpu golang.org/x/sys/unix +golang.org/x/sys/windows # golang.org/x/text v0.3.2 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -160,6 +361,7 @@ google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.6.0 google.golang.org/appengine +google.golang.org/appengine/cloudsql google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity google.golang.org/appengine/internal/base @@ -184,6 +386,8 @@ google.golang.org/grpc/credentials/internal google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload